diff --git a/.gitignore b/.gitignore index 7e3b0cd6a26b755aeac4422f530c331d25a0cc43..0a116f850780386a9fe1010b22164f4c7dbf8228 100644 --- a/.gitignore +++ b/.gitignore @@ -162,6 +162,7 @@ cython_debug/ # TeraFlowSDN-generated files tfs_runtime_env_vars.sh +tfs_runtime_env_vars*.sh tfs_bchain_runtime_env_vars.sh delete_local_deployment.sh local_docker_deployment.sh diff --git a/deploy.sh b/deploy.sh index add41fa139a0127cb26d652f5b47decfe8658ad0..fa1dc2b3623255d2dac82cc1d982c607b9b6af5b 100755 --- a/deploy.sh +++ b/deploy.sh @@ -36,9 +36,13 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # If not already set, set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""} -# If not already set, set the neew Grafana admin password +# If not already set, set the new Grafana admin password export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"} +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} + ######################################################################################################################## # Automated steps start here ######################################################################################################################## @@ -67,73 +71,75 @@ echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT for COMPONENT in $TFS_COMPONENTS; do echo "Processing '$COMPONENT' component..." - echo " Building Docker image..." - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" - - if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then - docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" - elif [ "$COMPONENT" == "pathcomp" ]; then - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" - docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" - - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" - docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" - # next command is redundant, but helpful to keep cache updated between rebuilds - IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" - docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" - elif [ "$COMPONENT" == "dlt" ]; then - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" - docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" - - BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" - docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" - else - docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" - fi + if [ "$TFS_SKIP_BUILD" != "YES" ]; then + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" - if [ -n "$TFS_REGISTRY_IMAGE" ]; then - echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" + else + docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi - if [ "$COMPONENT" == "pathcomp" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + if [ -n "$TFS_REGISTRY_IMAGE" ]; then + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGE'..." - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" - docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" - docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - elif [ "$COMPONENT" == "dlt" ]; then - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" - docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" - docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" - else - IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGE/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') - TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" - docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" - PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" - docker push "$IMAGE_URL" > "$PUSH_LOG" + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi fi fi diff --git a/hackfest/p4/setup.sh b/hackfest/p4/setup.sh index 07fe22e6aea2341c50462010b4bfb55c4a657a47..195327a03fedafdc64a2d0dc34577766eda72a4f 100755 --- a/hackfest/p4/setup.sh +++ b/hackfest/p4/setup.sh @@ -4,5 +4,5 @@ export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4 -kubectl cp src/tests/netx22-p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 -kubectl cp src/tests/netx22-p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 +kubectl cp hackfest/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 +kubectl cp hackfest/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 diff --git a/hackfest/p4/tests/Objects.py b/hackfest/p4/tests/Objects.py index 09b3aced843a198b7c963a34492a4fe2379c9123..c8b172244d714cd699ccc587e54c3751485a9a2e 100644 --- a/hackfest/p4/tests/Objects.py +++ b/hackfest/p4/tests/Objects.py @@ -1,4 +1,5 @@ # Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,6 +43,8 @@ PACKET_PORT_SAMPLE_TYPES = [ KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, ] +# ----- Device Credentials and Settings -------------------------------------------------------------------------------- + # ----- Devices -------------------------------------------------------------------------------------------------------- @@ -54,7 +57,7 @@ DEVICE_SW1 = json_device_p4_disabled(DEVICE_SW1_UUID) DEVICE_SW1_DPID = 1 DEVICE_SW1_NAME = DEVICE_SW1_UUID -DEVICE_SW1_IP_ADDR = '10.0.2.10' +DEVICE_SW1_IP_ADDR = 'localhost' DEVICE_SW1_PORT = '50001' DEVICE_SW1_VENDOR = 'Open Networking Foundation' DEVICE_SW1_HW_VER = 'BMv2 simple_switch' @@ -78,9 +81,38 @@ DEVICE_SW1_CONNECT_RULES = json_device_connect_rules( } ) +DEVICE_SW2_UUID = 'SW2' +DEVICE_SW2_TIMEOUT = 60 +DEVICE_SW2_ID = json_device_id(DEVICE_SW2_UUID) +DEVICE_SW2 = json_device_p4_disabled(DEVICE_SW2_UUID) -################################## TABLE ENTRIES ################################## +DEVICE_SW2_DPID = 1 +DEVICE_SW2_NAME = DEVICE_SW2_UUID +DEVICE_SW2_IP_ADDR = 'localhost' +DEVICE_SW2_PORT = '50002' +DEVICE_SW2_VENDOR = 'Open Networking Foundation' +DEVICE_SW2_HW_VER = 'BMv2 simple_switch' +DEVICE_SW2_SW_VER = 'Stratum' +DEVICE_SW2_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW2_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW2_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW2_IP_ADDR, + DEVICE_SW2_PORT, + { + 'id': DEVICE_SW2_DPID, + 'name': DEVICE_SW2_NAME, + 'vendor': DEVICE_SW2_VENDOR, + 'hw_ver': DEVICE_SW2_HW_VER, + 'sw_ver': DEVICE_SW2_SW_VER, + 'timeout': DEVICE_SW2_TIMEOUT, + 'p4bin': DEVICE_SW2_BIN_PATH, + 'p4info': DEVICE_SW2_INFO_PATH + } +) + +################################## TABLE ENTRIES ################################## DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ json_config_rule_set( @@ -123,6 +155,8 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ ) ] +DEVICE_SW2_CONFIG_TABLE_ENTRIES = DEVICE_SW1_CONFIG_TABLE_ENTRIES + """ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ @@ -171,7 +205,6 @@ DEVICE_SW1_CONFIG_TABLE_ENTRIES = [ ################################## TABLE DECONF ################################## - DEVICE_SW1_DECONF_TABLE_ENTRIES = [ json_config_rule_delete( 'table', @@ -213,6 +246,7 @@ DEVICE_SW1_DECONF_TABLE_ENTRIES = [ ) ] +DEVICE_SW2_DECONF_TABLE_ENTRIES = DEVICE_SW1_DECONF_TABLE_ENTRIES """ @@ -271,6 +305,7 @@ TOPOLOGIES = [TOPOLOGY] DEVICES = [ (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_CONFIG_TABLE_ENTRIES, DEVICE_SW1_DECONF_TABLE_ENTRIES), + (DEVICE_SW2, DEVICE_SW2_CONNECT_RULES, DEVICE_SW2_CONFIG_TABLE_ENTRIES, DEVICE_SW2_DECONF_TABLE_ENTRIES), ] LINKS = [] diff --git a/hackfest/p4/tests/test_functional_cleanup.py b/hackfest/p4/tests/test_functional_cleanup.py index 32f716f1c2287b11bae3610022d64659d82ba73d..ccbcb9843a03bbf095743af0753da3fe8af3bfce 100644 --- a/hackfest/p4/tests/test_functional_cleanup.py +++ b/hackfest/p4/tests/test_functional_cleanup.py @@ -54,8 +54,8 @@ def test_scenario_cleanup( device_client.DeleteDevice(DeviceId(**device_id)) #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ for topology in TOPOLOGIES: diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 04da586dfeb25a01a6f5267aa31441498ce4f2cc..5c07971a328a389473899375f2d2aad9031f473e 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -34,10 +34,10 @@ spec: - containerPort: 6379 resources: requests: - cpu: 250m - memory: 512Mi + cpu: 100m + memory: 128Mi limits: - cpu: 700m + cpu: 500m memory: 1024Mi - name: server image: registry.gitlab.com/teraflow-h2020/controller/context:latest @@ -64,11 +64,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:1010"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 171394f7c43b2447e898902c78d5276fe1bcbc7c..d2595ab1915554d7ebfd786b8f39b531e40da490 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:2020"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 5ef6eae7de6cb7c839b0cb17e65c8b3f045c1d66..d2ad4f40444faa6b9de7724f8b3df077bb7910b2 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -35,6 +35,11 @@ spec: env: - name: LOG_LEVEL value: "INFO" + ## for debug purposes + #- name: DLT_GATEWAY_HOST + # value: "mock-blockchain.tfs-bchain.svc.cluster.local" + #- name: DLT_GATEWAY_PORT + # value: "50051" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:8080"] @@ -43,14 +48,16 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:8080"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi - name: gateway image: registry.gitlab.com/teraflow-h2020/controller/dlt-gateway:latest imagePullPolicy: Always + ports: + - containerPort: 50051 #readinessProbe: # httpGet: # path: /health @@ -65,7 +72,7 @@ spec: # timeoutSeconds: 5 resources: requests: - cpu: 250m + cpu: 200m memory: 512Mi limits: cpu: 700m diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index ca30da0101659f801440af343e42851146d17bda..3ef3ffba301cadf26beaa34787dcd816e87c65a0 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 10010 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10010"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml index b383d7db42be9eb3c9dc7758c230f5250eb43db1..bf9abac703b263ad6a843f0d70848dde94a4ab97 100644 --- a/manifests/mock_blockchain.yaml +++ b/manifests/mock_blockchain.yaml @@ -34,7 +34,7 @@ spec: - containerPort: 50051 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:50051"] @@ -43,7 +43,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:50051"] resources: requests: - cpu: 250m + cpu: 100m memory: 512Mi limits: cpu: 700m diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index d5939cb154443139be88d8e0ac23c281a3b18c4d..92e24ac42b7b86be6056709abd9a2cd6fc16598b 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10020"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi - name: backend image: registry.gitlab.com/teraflow-h2020/controller/pathcomp-backend:latest imagePullPolicy: Always @@ -65,8 +65,8 @@ spec: # timeoutSeconds: 5 resources: requests: - cpu: 250m - memory: 512Mi + cpu: 100m + memory: 256Mi limits: cpu: 700m memory: 1024Mi diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 75832b94fa2a6ba97617641e7b249157508614bf..a5568a5112eb08a02df2178ba45db57b57c19cc3 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:3030"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 8c76618a96fa6cc7b83bc6ebf52062958a2a3689..b20669b0c03cc22857abd1534e19780025b9066a 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -43,11 +43,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:4040"] resources: requests: - cpu: 250m - memory: 512Mi + cpu: 50m + memory: 64Mi limits: - cpu: 700m - memory: 1024Mi + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index cac64a816075f1a0ad91a21c519463aa5cd8f973..7f70e837c4b6b979477a3a02db6e744b41387d73 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -38,7 +38,7 @@ spec: - containerPort: 8004 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: WEBUISERVICE_SERVICE_BASEURL_HTTP value: "/webui/" readinessProbe: @@ -55,7 +55,7 @@ spec: timeoutSeconds: 1 resources: requests: - cpu: 250m + cpu: 100m memory: 512Mi limits: cpu: 700m diff --git a/my_deploy.sh b/my_deploy.sh index e70a12e1556ab06f6daa89c316c6a6ed61c4e059..ffd91da35186fe21f418950493ef797a9af1b522 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -18,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" -# Set the neew Grafana admin password +# Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} diff --git a/nfvsdn22 b/nfvsdn22 new file mode 120000 index 0000000000000000000000000000000000000000..ac93a84be42e09c11106c5e0836bb4e51cc1fa1a --- /dev/null +++ b/nfvsdn22 @@ -0,0 +1 @@ +src/tests/nfvsdn22/ \ No newline at end of file diff --git a/proto/context.proto b/proto/context.proto index 5b49bd28866af919332ab7188bbf66203e8b766d..3f0532d231535c2e59c798cbc9a6b1c92e1eb4bf 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -158,6 +158,11 @@ message Device { DeviceOperationalStatusEnum device_operational_status = 4; repeated DeviceDriverEnum device_drivers = 5; repeated EndPoint device_endpoints = 6; + repeated Component component = 7; // Used for inventory +} + +message Component { + repeated string comp_string = 1; } message DeviceConfig { diff --git a/proto/dlt_connector.proto b/proto/dlt_connector.proto index c8cbeb663fafb3c133092e9c49c2ece3f59d75ae..1038d6ccd40c8393313fc7f8dbfd48b1e0cf1739 100644 --- a/proto/dlt_connector.proto +++ b/proto/dlt_connector.proto @@ -18,14 +18,37 @@ package dlt; import "context.proto"; service DltConnectorService { - rpc RecordAll (context.Empty ) returns (context.Empty) {} + rpc RecordAll (context.TopologyId) returns (context.Empty) {} - rpc RecordAllDevices (context.Empty ) returns (context.Empty) {} - rpc RecordDevice (context.DeviceId ) returns (context.Empty) {} + rpc RecordAllDevices (context.TopologyId) returns (context.Empty) {} + rpc RecordDevice (DltDeviceId ) returns (context.Empty) {} - rpc RecordAllServices(context.Empty ) returns (context.Empty) {} - rpc RecordService (context.ServiceId) returns (context.Empty) {} + rpc RecordAllLinks (context.TopologyId) returns (context.Empty) {} + rpc RecordLink (DltLinkId ) returns (context.Empty) {} - rpc RecordAllSlices (context.Empty ) returns (context.Empty) {} - rpc RecordSlice (context.SliceId ) returns (context.Empty) {} + rpc RecordAllServices(context.TopologyId) returns (context.Empty) {} + rpc RecordService (DltServiceId ) returns (context.Empty) {} + + rpc RecordAllSlices (context.TopologyId) returns (context.Empty) {} + rpc RecordSlice (DltSliceId ) returns (context.Empty) {} +} + +message DltDeviceId { + context.TopologyId topology_id = 1; + context.DeviceId device_id = 2; +} + +message DltLinkId { + context.TopologyId topology_id = 1; + context.LinkId link_id = 2; +} + +message DltServiceId { + context.TopologyId topology_id = 1; + context.ServiceId service_id = 2; +} + +message DltSliceId { + context.TopologyId topology_id = 1; + context.SliceId slice_id = 2; } diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto index 7445a0f25a57df9793bd8761da024581988cf9e6..4419a8df4a22047d8708c5cf2e2c3657148b5eeb 100644 --- a/proto/kpi_sample_types.proto +++ b/proto/kpi_sample_types.proto @@ -16,9 +16,19 @@ syntax = "proto3"; package kpi_sample_types; enum KpiSampleType { - KPISAMPLETYPE_UNKNOWN = 0; - KPISAMPLETYPE_PACKETS_TRANSMITTED = 101; - KPISAMPLETYPE_PACKETS_RECEIVED = 102; - KPISAMPLETYPE_BYTES_TRANSMITTED = 201; - KPISAMPLETYPE_BYTES_RECEIVED = 202; + KPISAMPLETYPE_UNKNOWN = 0; + KPISAMPLETYPE_PACKETS_TRANSMITTED = 101; + KPISAMPLETYPE_PACKETS_RECEIVED = 102; + KPISAMPLETYPE_PACKETS_DROPPED = 103; + KPISAMPLETYPE_BYTES_TRANSMITTED = 201; + KPISAMPLETYPE_BYTES_RECEIVED = 202; + KPISAMPLETYPE_BYTES_DROPPED = 203; + KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; + KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602; + KPISAMPLETYPE_L3_UNIQUE_ATTACKERS = 603; + KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS = 604; + KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO = 605; + KPISAMPLETYPE_SERVICE_LATENCY_MS = 701; } diff --git a/proto/monitoring.proto b/proto/monitoring.proto index 9be39db909d915b2a9b5d99b01841db028959543..f9c408c96ced121f35cc1116bf64d013e7320e6a 100644 --- a/proto/monitoring.proto +++ b/proto/monitoring.proto @@ -25,7 +25,7 @@ service MonitoringService { rpc GetKpiDescriptorList (context.Empty ) returns (KpiDescriptorList ) {} // Stable and final rpc IncludeKpi (Kpi ) returns (context.Empty ) {} // Stable and final rpc MonitorKpi (MonitorKpiRequest ) returns (context.Empty ) {} // Stable and final - rpc QueryKpiData (KpiQuery ) returns (KpiList ) {} // Not implemented + rpc QueryKpiData (KpiQuery ) returns (RawKpiTable ) {} // Not implemented rpc SetKpiSubscription (SubsDescriptor ) returns (stream SubsResponse ) {} // Stable not final rpc GetSubsDescriptor (SubscriptionID ) returns (SubsDescriptor ) {} // Stable and final rpc GetSubscriptions (context.Empty ) returns (SubsList ) {} // Stable and final @@ -36,7 +36,7 @@ service MonitoringService { rpc GetAlarmResponseStream(AlarmSubscription ) returns (stream AlarmResponse) {} // Not Stable not final rpc DeleteAlarm (AlarmID ) returns (context.Empty ) {} // Stable and final rpc GetStreamKpi (KpiId ) returns (stream Kpi ) {} // Stable not final - rpc GetInstantKpi (KpiId ) returns (Kpi ) {} // Stable not final + rpc GetInstantKpi (KpiId ) returns (Kpi ) {} // Stable not final } message KpiDescriptor { @@ -48,6 +48,7 @@ message KpiDescriptor { context.EndPointId endpoint_id = 6; context.ServiceId service_id = 7; context.SliceId slice_id = 8; + context.ConnectionId connection_id = 9; } message MonitorKpiRequest { @@ -58,13 +59,26 @@ message MonitorKpiRequest { } message KpiQuery { - KpiId kpi_id = 1; + repeated KpiId kpi_ids = 1; float monitoring_window_s = 2; - float sampling_rate_s = 3; - uint32 last_n_samples = 4; // used when you want something like "get the last N many samples - context.Timestamp start_timestamp = 5; // used when you want something like "get the samples since X date/time" - context.Timestamp end_timestamp = 6; // used when you want something like "get the samples until X date/time" - // Pending add field to reflect Available Device Protocols + uint32 last_n_samples = 3; // used when you want something like "get the last N many samples + context.Timestamp start_timestamp = 4; // used when you want something like "get the samples since X date/time" + context.Timestamp end_timestamp = 5; // used when you want something like "get the samples until X date/time" +} + + +message RawKpi { // cell + context.Timestamp timestamp = 1; + KpiValue kpi_value = 2; +} + +message RawKpiList { // column + KpiId kpi_id = 1; + repeated RawKpi raw_kpis = 2; +} + +message RawKpiTable { // table + repeated RawKpiList raw_kpi_lists = 1; } message KpiId { diff --git a/proto/policy.proto b/proto/policy.proto index d8e51caea2231e21b982771e7a4d63f3db93471c..9d0c34a3304f68c47a19ac56d0e96b10936bee7b 100644 --- a/proto/policy.proto +++ b/proto/policy.proto @@ -109,5 +109,5 @@ message PolicyRuleDeviceList { // A list of policy rules message PolicyRuleList { - repeated PolicyRuleId policyRules = 1; + repeated PolicyRule policyRules = 1; } diff --git a/scripts/show_logs_dlt_connector.sh b/scripts/show_logs_dlt_connector.sh new file mode 100755 index 0000000000000000000000000000000000000000..db4c388c20399007ba10b357a5e153df4a86c519 --- /dev/null +++ b/scripts/show_logs_dlt_connector.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c connector diff --git a/scripts/show_logs_dlt_gateway.sh b/scripts/show_logs_dlt_gateway.sh new file mode 100755 index 0000000000000000000000000000000000000000..c00be2df16cb69b3ace501a854d1248a72abbf3e --- /dev/null +++ b/scripts/show_logs_dlt_gateway.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/dltservice -c gateway diff --git a/scripts/show_logs_monitoring.sh b/scripts/show_logs_monitoring.sh index 520a9da1c652553eb90acd083caf5724275f4efe..faa825fdfae2bb85f0790a877b75d533ff5aa0d5 100755 --- a/scripts/show_logs_monitoring.sh +++ b/scripts/show_logs_monitoring.sh @@ -24,4 +24,4 @@ export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} # Automated steps start here ######################################################################################################################## -kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringserver +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice server diff --git a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java index 6d672fdea2c3e97f9f2a50c7efa8d77c05532357..466ebf7fefe69645cd23c72ca3b61d8e5bdd4713 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java +++ b/src/automation/src/main/java/eu/teraflow/automation/AutomationGatewayImpl.java @@ -76,12 +76,11 @@ public class AutomationGatewayImpl implements AutomationGateway { @Override public Uni<Automation.DeviceRoleState> ztpDelete(Automation.DeviceRole request) { - return Uni.createFrom() - .item( - () -> - Automation.DeviceRoleState.newBuilder() - .setDevRoleId(request.getDevRoleId()) - .build()); + final var devRoleId = request.getDevRoleId().getDevRoleId().getUuid(); + return automationService + .deleteDevice(devRoleId) + .onItem() + .transform(device -> transformToDeviceRoleState(device, devRoleId, DeviceState.DELETED)); } @Override diff --git a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java index 2fc3a3356456b3c1bc55137f686a7e82570a3171..76e536d9bc03c196005a91be1c82d15879a6f8e8 100644 --- a/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java +++ b/src/automation/src/main/java/eu/teraflow/automation/ContextSubscriber.java @@ -79,8 +79,8 @@ public class ContextSubscriber { break; case UPDATE: LOGGER.warnf( - "Received %s for device [%s]. " + - "No automation action on an already updated device", + "Received %s for device [%s]. " + + "No automation action on an already updated device", event, deviceId); break; case UNDEFINED: diff --git a/src/automation/src/test/java/eu/teraflow/automation/AutomationServiceTest.java b/src/automation/src/test/java/eu/teraflow/automation/AutomationServiceTest.java index 85ed170efbf938b11303d92a6697c89836e0bf70..e17fc8304455a543ffa96b4c1239abb77be4daca 100644 --- a/src/automation/src/test/java/eu/teraflow/automation/AutomationServiceTest.java +++ b/src/automation/src/test/java/eu/teraflow/automation/AutomationServiceTest.java @@ -280,6 +280,53 @@ class AutomationServiceTest { final var uuid = serializer.serializeUuid(UUID_VALUE); final var deviceRoleId = Automation.DeviceRoleId.newBuilder().setDevRoleId(uuid).build(); final var deviceRole = Automation.DeviceRole.newBuilder().setDevRoleId(deviceRoleId).build(); + final var DEVICE_ID = "0f14d0ab-9608-7862-a9e4-5ed26688389b"; + final var DEVICE_ROLE_ID = "0f14d0ab-9608-7862-a9e4-5ed26688389a"; + final var DEVICE_TYPE = "ztp"; + + final var deviceDrivers = List.of(DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.P4); + + final var topologyIdA = new TopologyId("contextIdA", "idA"); + final var deviceIdA = "deviceIdA"; + final var idA = "idA"; + final var endPointIdA = new EndPointId(topologyIdA, deviceIdA, idA); + + final var endPointTypeA = "endPointTypeA"; + final var kpiSampleTypesA = + List.of(KpiSampleType.BYTES_RECEIVED, KpiSampleType.BYTES_TRANSMITTED); + final var locationTypeRegionA = new LocationTypeRegion("ATH"); + final var locationA = new Location(locationTypeRegionA); + final var endPointA = + new EndPointBuilder(endPointIdA, endPointTypeA, kpiSampleTypesA) + .location(locationA) + .build(); + + final var topologyIdB = new TopologyId("contextIdB", "idB"); + final var deviceIdB = "deviceIdB"; + final var idB = "idB"; + final var endPointIdB = new EndPointId(topologyIdB, deviceIdB, idB); + final var endPointTypeB = "endPointTypeB"; + final var kpiSampleTypesB = + List.of(KpiSampleType.BYTES_RECEIVED, KpiSampleType.BYTES_TRANSMITTED); + final var locationTypeRegionB = new LocationTypeRegion("ATH"); + final var locationB = new Location(locationTypeRegionB); + final var endPointB = + new EndPointBuilder(endPointIdB, endPointTypeB, kpiSampleTypesB) + .location(locationB) + .build(); + + final var endPoints = List.of(endPointA, endPointB); + + final var emptyDeviceConfig = new DeviceConfig(List.of()); + final var device = + new Device( + DEVICE_ID, + DEVICE_TYPE, + emptyDeviceConfig, + DeviceOperationalStatus.ENABLED, + deviceDrivers, + endPoints); + Mockito.when(contextGateway.getDevice(Mockito.any())).thenReturn(Uni.createFrom().item(device)); client .ztpDelete(deviceRole) diff --git a/src/automation/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/automation/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java index 67e1ec736f9d83cbf95b419e9e61e92e82e73b88..217672b2e8de2d7c840833a937b0fb04c38a221b 100644 --- a/src/automation/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java +++ b/src/automation/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java @@ -31,6 +31,10 @@ public final class KpiSampleTypes { * <code>KPISAMPLETYPE_PACKETS_RECEIVED = 102;</code> */ KPISAMPLETYPE_PACKETS_RECEIVED(102), + /** + * <code>KPISAMPLETYPE_PACKETS_DROPPED = 103;</code> + */ + KPISAMPLETYPE_PACKETS_DROPPED(103), /** * <code>KPISAMPLETYPE_BYTES_TRANSMITTED = 201;</code> */ @@ -39,6 +43,50 @@ public final class KpiSampleTypes { * <code>KPISAMPLETYPE_BYTES_RECEIVED = 202;</code> */ KPISAMPLETYPE_BYTES_RECEIVED(202), + /** + * <code>KPISAMPLETYPE_BYTES_DROPPED = 203;</code> + */ + KPISAMPLETYPE_BYTES_DROPPED(203), + /** + * <pre> + *. can be used by both optical and L3 without any issue + * </pre> + * + * <code>KPISAMPLETYPE_ML_CONFIDENCE = 401;</code> + */ + KPISAMPLETYPE_ML_CONFIDENCE(401), + /** + * <pre> + *. can be used by both optical and L3 without any issue + * </pre> + * + * <code>KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501;</code> + */ + KPISAMPLETYPE_OPTICAL_SECURITY_STATUS(501), + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601;</code> + */ + KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS(601), + /** + * <code>KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602;</code> + */ + KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS(602), + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_ATTACKERS = 603;</code> + */ + KPISAMPLETYPE_L3_UNIQUE_ATTACKERS(603), + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS = 604;</code> + */ + KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS(604), + /** + * <code>KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO = 605;</code> + */ + KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO(605), + /** + * <code>KPISAMPLETYPE_SERVICE_LATENCY_MS = 701;</code> + */ + KPISAMPLETYPE_SERVICE_LATENCY_MS(701), UNRECOGNIZED(-1), ; @@ -54,6 +102,10 @@ public final class KpiSampleTypes { * <code>KPISAMPLETYPE_PACKETS_RECEIVED = 102;</code> */ public static final int KPISAMPLETYPE_PACKETS_RECEIVED_VALUE = 102; + /** + * <code>KPISAMPLETYPE_PACKETS_DROPPED = 103;</code> + */ + public static final int KPISAMPLETYPE_PACKETS_DROPPED_VALUE = 103; /** * <code>KPISAMPLETYPE_BYTES_TRANSMITTED = 201;</code> */ @@ -62,6 +114,50 @@ public final class KpiSampleTypes { * <code>KPISAMPLETYPE_BYTES_RECEIVED = 202;</code> */ public static final int KPISAMPLETYPE_BYTES_RECEIVED_VALUE = 202; + /** + * <code>KPISAMPLETYPE_BYTES_DROPPED = 203;</code> + */ + public static final int KPISAMPLETYPE_BYTES_DROPPED_VALUE = 203; + /** + * <pre> + *. can be used by both optical and L3 without any issue + * </pre> + * + * <code>KPISAMPLETYPE_ML_CONFIDENCE = 401;</code> + */ + public static final int KPISAMPLETYPE_ML_CONFIDENCE_VALUE = 401; + /** + * <pre> + *. can be used by both optical and L3 without any issue + * </pre> + * + * <code>KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501;</code> + */ + public static final int KPISAMPLETYPE_OPTICAL_SECURITY_STATUS_VALUE = 501; + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601;</code> + */ + public static final int KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS_VALUE = 601; + /** + * <code>KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602;</code> + */ + public static final int KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS_VALUE = 602; + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_ATTACKERS = 603;</code> + */ + public static final int KPISAMPLETYPE_L3_UNIQUE_ATTACKERS_VALUE = 603; + /** + * <code>KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS = 604;</code> + */ + public static final int KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS_VALUE = 604; + /** + * <code>KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO = 605;</code> + */ + public static final int KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO_VALUE = 605; + /** + * <code>KPISAMPLETYPE_SERVICE_LATENCY_MS = 701;</code> + */ + public static final int KPISAMPLETYPE_SERVICE_LATENCY_MS_VALUE = 701; public final int getNumber() { @@ -91,8 +187,18 @@ public final class KpiSampleTypes { case 0: return KPISAMPLETYPE_UNKNOWN; case 101: return KPISAMPLETYPE_PACKETS_TRANSMITTED; case 102: return KPISAMPLETYPE_PACKETS_RECEIVED; + case 103: return KPISAMPLETYPE_PACKETS_DROPPED; case 201: return KPISAMPLETYPE_BYTES_TRANSMITTED; case 202: return KPISAMPLETYPE_BYTES_RECEIVED; + case 203: return KPISAMPLETYPE_BYTES_DROPPED; + case 401: return KPISAMPLETYPE_ML_CONFIDENCE; + case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; + case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; + case 602: return KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS; + case 603: return KPISAMPLETYPE_L3_UNIQUE_ATTACKERS; + case 604: return KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CLIENTS; + case 605: return KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO; + case 701: return KPISAMPLETYPE_SERVICE_LATENCY_MS; default: return null; } } @@ -159,12 +265,22 @@ public final class KpiSampleTypes { static { java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + - "es*\276\001\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + + "es*\327\004\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + - "\020f\022$\n\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001" + - "\022!\n\034KPISAMPLETYPE_BYTES_RECEIVED\020\312\001b\006pro" + - "to3" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + + "PLETYPE_BYTES_DROPPED\020\313\001\022 \n\033KPISAMPLETYP" + + "E_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLETYPE_OPTI" + + "CAL_SECURITY_STATUS\020\365\003\022)\n$KPISAMPLETYPE_" + + "L3_UNIQUE_ATTACK_CONNS\020\331\004\022*\n%KPISAMPLETY" + + "PE_L3_TOTAL_DROPPED_PACKTS\020\332\004\022&\n!KPISAMP" + + "LETYPE_L3_UNIQUE_ATTACKERS\020\333\004\0220\n+KPISAMP" + + "LETYPE_L3_UNIQUE_COMPROMISED_CLIENTS\020\334\004\022" + + ",\n\'KPISAMPLETYPE_L3_SECURITY_STATUS_CRYP" + + "TO\020\335\004\022%\n KPISAMPLETYPE_SERVICE_LATENCY_M" + + "S\020\275\005b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/src/automation/target/generated-sources/grpc/monitoring/Monitoring.java b/src/automation/target/generated-sources/grpc/monitoring/Monitoring.java index 9d05f3da8a831e74922e65473206539680c8d78b..38f026eb1ac730e8f825e460916dc57469f0d312 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/Monitoring.java +++ b/src/automation/target/generated-sources/grpc/monitoring/Monitoring.java @@ -139,6 +139,21 @@ public final class Monitoring { * <code>.context.SliceId slice_id = 8;</code> */ context.ContextOuterClass.SliceIdOrBuilder getSliceIdOrBuilder(); + + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return Whether the connectionId field is set. + */ + boolean hasConnectionId(); + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return The connectionId. + */ + context.ContextOuterClass.ConnectionId getConnectionId(); + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + context.ContextOuterClass.ConnectionIdOrBuilder getConnectionIdOrBuilder(); } /** * Protobuf type {@code monitoring.KpiDescriptor} @@ -275,6 +290,19 @@ public final class Monitoring { break; } + case 74: { + context.ContextOuterClass.ConnectionId.Builder subBuilder = null; + if (connectionId_ != null) { + subBuilder = connectionId_.toBuilder(); + } + connectionId_ = input.readMessage(context.ContextOuterClass.ConnectionId.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(connectionId_); + connectionId_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -537,6 +565,32 @@ public final class Monitoring { return getSliceId(); } + public static final int CONNECTION_ID_FIELD_NUMBER = 9; + private context.ContextOuterClass.ConnectionId connectionId_; + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return Whether the connectionId field is set. + */ + @java.lang.Override + public boolean hasConnectionId() { + return connectionId_ != null; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return The connectionId. + */ + @java.lang.Override + public context.ContextOuterClass.ConnectionId getConnectionId() { + return connectionId_ == null ? context.ContextOuterClass.ConnectionId.getDefaultInstance() : connectionId_; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + @java.lang.Override + public context.ContextOuterClass.ConnectionIdOrBuilder getConnectionIdOrBuilder() { + return getConnectionId(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -575,6 +629,9 @@ public final class Monitoring { if (sliceId_ != null) { output.writeMessage(8, getSliceId()); } + if (connectionId_ != null) { + output.writeMessage(9, getConnectionId()); + } unknownFields.writeTo(output); } @@ -615,6 +672,10 @@ public final class Monitoring { size += com.google.protobuf.CodedOutputStream .computeMessageSize(8, getSliceId()); } + if (connectionId_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, getConnectionId()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -660,6 +721,11 @@ public final class Monitoring { if (!getSliceId() .equals(other.getSliceId())) return false; } + if (hasConnectionId() != other.hasConnectionId()) return false; + if (hasConnectionId()) { + if (!getConnectionId() + .equals(other.getConnectionId())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -699,6 +765,10 @@ public final class Monitoring { hash = (37 * hash) + SLICE_ID_FIELD_NUMBER; hash = (53 * hash) + getSliceId().hashCode(); } + if (hasConnectionId()) { + hash = (37 * hash) + CONNECTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getConnectionId().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -873,6 +943,12 @@ public final class Monitoring { sliceId_ = null; sliceIdBuilder_ = null; } + if (connectionIdBuilder_ == null) { + connectionId_ = null; + } else { + connectionId_ = null; + connectionIdBuilder_ = null; + } return this; } @@ -936,6 +1012,11 @@ public final class Monitoring { } else { result.sliceId_ = sliceIdBuilder_.build(); } + if (connectionIdBuilder_ == null) { + result.connectionId_ = connectionId_; + } else { + result.connectionId_ = connectionIdBuilder_.build(); + } onBuilt(); return result; } @@ -1032,6 +1113,9 @@ public final class Monitoring { if (other.hasSliceId()) { mergeSliceId(other.getSliceId()); } + if (other.hasConnectionId()) { + mergeConnectionId(other.getConnectionId()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -2026,6 +2110,125 @@ public final class Monitoring { } return sliceIdBuilder_; } + + private context.ContextOuterClass.ConnectionId connectionId_; + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.ConnectionId, context.ContextOuterClass.ConnectionId.Builder, context.ContextOuterClass.ConnectionIdOrBuilder> connectionIdBuilder_; + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return Whether the connectionId field is set. + */ + public boolean hasConnectionId() { + return connectionIdBuilder_ != null || connectionId_ != null; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + * @return The connectionId. + */ + public context.ContextOuterClass.ConnectionId getConnectionId() { + if (connectionIdBuilder_ == null) { + return connectionId_ == null ? context.ContextOuterClass.ConnectionId.getDefaultInstance() : connectionId_; + } else { + return connectionIdBuilder_.getMessage(); + } + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public Builder setConnectionId(context.ContextOuterClass.ConnectionId value) { + if (connectionIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + connectionId_ = value; + onChanged(); + } else { + connectionIdBuilder_.setMessage(value); + } + + return this; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public Builder setConnectionId( + context.ContextOuterClass.ConnectionId.Builder builderForValue) { + if (connectionIdBuilder_ == null) { + connectionId_ = builderForValue.build(); + onChanged(); + } else { + connectionIdBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public Builder mergeConnectionId(context.ContextOuterClass.ConnectionId value) { + if (connectionIdBuilder_ == null) { + if (connectionId_ != null) { + connectionId_ = + context.ContextOuterClass.ConnectionId.newBuilder(connectionId_).mergeFrom(value).buildPartial(); + } else { + connectionId_ = value; + } + onChanged(); + } else { + connectionIdBuilder_.mergeFrom(value); + } + + return this; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public Builder clearConnectionId() { + if (connectionIdBuilder_ == null) { + connectionId_ = null; + onChanged(); + } else { + connectionId_ = null; + connectionIdBuilder_ = null; + } + + return this; + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public context.ContextOuterClass.ConnectionId.Builder getConnectionIdBuilder() { + + onChanged(); + return getConnectionIdFieldBuilder().getBuilder(); + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + public context.ContextOuterClass.ConnectionIdOrBuilder getConnectionIdOrBuilder() { + if (connectionIdBuilder_ != null) { + return connectionIdBuilder_.getMessageOrBuilder(); + } else { + return connectionId_ == null ? + context.ContextOuterClass.ConnectionId.getDefaultInstance() : connectionId_; + } + } + /** + * <code>.context.ConnectionId connection_id = 9;</code> + */ + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.ConnectionId, context.ContextOuterClass.ConnectionId.Builder, context.ContextOuterClass.ConnectionIdOrBuilder> + getConnectionIdFieldBuilder() { + if (connectionIdBuilder_ == null) { + connectionIdBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.ConnectionId, context.ContextOuterClass.ConnectionId.Builder, context.ContextOuterClass.ConnectionIdOrBuilder>( + getConnectionId(), + getParentForChildren(), + isClean()); + connectionId_ = null; + } + return connectionIdBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -2869,27 +3072,27 @@ public final class Monitoring { com.google.protobuf.MessageOrBuilder { /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ java.util.List<monitoring.Monitoring.KpiId> - getKpiIdList(); + getKpiIdsList(); /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - monitoring.Monitoring.KpiId getKpiId(int index); + monitoring.Monitoring.KpiId getKpiIds(int index); /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - int getKpiIdCount(); + int getKpiIdsCount(); /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList(); + getKpiIdsOrBuilderList(); /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( + monitoring.Monitoring.KpiIdOrBuilder getKpiIdsOrBuilder( int index); /** @@ -2898,18 +3101,12 @@ public final class Monitoring { */ float getMonitoringWindowS(); - /** - * <code>float sampling_rate_s = 3;</code> - * @return The samplingRateS. - */ - float getSamplingRateS(); - /** * <pre> * used when you want something like "get the last N many samples * </pre> * - * <code>uint32 last_n_samples = 4;</code> + * <code>uint32 last_n_samples = 3;</code> * @return The lastNSamples. */ int getLastNSamples(); @@ -2919,7 +3116,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return Whether the startTimestamp field is set. */ boolean hasStartTimestamp(); @@ -2928,7 +3125,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return The startTimestamp. */ context.ContextOuterClass.Timestamp getStartTimestamp(); @@ -2937,7 +3134,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ context.ContextOuterClass.TimestampOrBuilder getStartTimestampOrBuilder(); @@ -2946,7 +3143,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return Whether the endTimestamp field is set. */ boolean hasEndTimestamp(); @@ -2955,7 +3152,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return The endTimestamp. */ context.ContextOuterClass.Timestamp getEndTimestamp(); @@ -2964,7 +3161,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ context.ContextOuterClass.TimestampOrBuilder getEndTimestampOrBuilder(); } @@ -2981,7 +3178,7 @@ public final class Monitoring { super(builder); } private KpiQuery() { - kpiId_ = java.util.Collections.emptyList(); + kpiIds_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -3017,10 +3214,10 @@ public final class Monitoring { break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - kpiId_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(); + kpiIds_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(); mutable_bitField0_ |= 0x00000001; } - kpiId_.add( + kpiIds_.add( input.readMessage(monitoring.Monitoring.KpiId.parser(), extensionRegistry)); break; } @@ -3029,17 +3226,12 @@ public final class Monitoring { monitoringWindowS_ = input.readFloat(); break; } - case 29: { - - samplingRateS_ = input.readFloat(); - break; - } - case 32: { + case 24: { lastNSamples_ = input.readUInt32(); break; } - case 42: { + case 34: { context.ContextOuterClass.Timestamp.Builder subBuilder = null; if (startTimestamp_ != null) { subBuilder = startTimestamp_.toBuilder(); @@ -3052,7 +3244,7 @@ public final class Monitoring { break; } - case 50: { + case 42: { context.ContextOuterClass.Timestamp.Builder subBuilder = null; if (endTimestamp_ != null) { subBuilder = endTimestamp_.toBuilder(); @@ -3081,7 +3273,7 @@ public final class Monitoring { e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { - kpiId_ = java.util.Collections.unmodifiableList(kpiId_); + kpiIds_ = java.util.Collections.unmodifiableList(kpiIds_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -3100,44 +3292,44 @@ public final class Monitoring { monitoring.Monitoring.KpiQuery.class, monitoring.Monitoring.KpiQuery.Builder.class); } - public static final int KPI_ID_FIELD_NUMBER = 1; - private java.util.List<monitoring.Monitoring.KpiId> kpiId_; + public static final int KPI_IDS_FIELD_NUMBER = 1; + private java.util.List<monitoring.Monitoring.KpiId> kpiIds_; /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ @java.lang.Override - public java.util.List<monitoring.Monitoring.KpiId> getKpiIdList() { - return kpiId_; + public java.util.List<monitoring.Monitoring.KpiId> getKpiIdsList() { + return kpiIds_; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ @java.lang.Override public java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList() { - return kpiId_; + getKpiIdsOrBuilderList() { + return kpiIds_; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ @java.lang.Override - public int getKpiIdCount() { - return kpiId_.size(); + public int getKpiIdsCount() { + return kpiIds_.size(); } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiId getKpiId(int index) { - return kpiId_.get(index); + public monitoring.Monitoring.KpiId getKpiIds(int index) { + return kpiIds_.get(index); } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdsOrBuilder( int index) { - return kpiId_.get(index); + return kpiIds_.get(index); } public static final int MONITORING_WINDOW_S_FIELD_NUMBER = 2; @@ -3151,25 +3343,14 @@ public final class Monitoring { return monitoringWindowS_; } - public static final int SAMPLING_RATE_S_FIELD_NUMBER = 3; - private float samplingRateS_; - /** - * <code>float sampling_rate_s = 3;</code> - * @return The samplingRateS. - */ - @java.lang.Override - public float getSamplingRateS() { - return samplingRateS_; - } - - public static final int LAST_N_SAMPLES_FIELD_NUMBER = 4; + public static final int LAST_N_SAMPLES_FIELD_NUMBER = 3; private int lastNSamples_; /** * <pre> * used when you want something like "get the last N many samples * </pre> * - * <code>uint32 last_n_samples = 4;</code> + * <code>uint32 last_n_samples = 3;</code> * @return The lastNSamples. */ @java.lang.Override @@ -3177,14 +3358,14 @@ public final class Monitoring { return lastNSamples_; } - public static final int START_TIMESTAMP_FIELD_NUMBER = 5; + public static final int START_TIMESTAMP_FIELD_NUMBER = 4; private context.ContextOuterClass.Timestamp startTimestamp_; /** * <pre> * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return Whether the startTimestamp field is set. */ @java.lang.Override @@ -3196,7 +3377,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return The startTimestamp. */ @java.lang.Override @@ -3208,21 +3389,21 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ @java.lang.Override public context.ContextOuterClass.TimestampOrBuilder getStartTimestampOrBuilder() { return getStartTimestamp(); } - public static final int END_TIMESTAMP_FIELD_NUMBER = 6; + public static final int END_TIMESTAMP_FIELD_NUMBER = 5; private context.ContextOuterClass.Timestamp endTimestamp_; /** * <pre> * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return Whether the endTimestamp field is set. */ @java.lang.Override @@ -3234,7 +3415,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return The endTimestamp. */ @java.lang.Override @@ -3246,7 +3427,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ @java.lang.Override public context.ContextOuterClass.TimestampOrBuilder getEndTimestampOrBuilder() { @@ -3267,23 +3448,20 @@ public final class Monitoring { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < kpiId_.size(); i++) { - output.writeMessage(1, kpiId_.get(i)); + for (int i = 0; i < kpiIds_.size(); i++) { + output.writeMessage(1, kpiIds_.get(i)); } if (monitoringWindowS_ != 0F) { output.writeFloat(2, monitoringWindowS_); } - if (samplingRateS_ != 0F) { - output.writeFloat(3, samplingRateS_); - } if (lastNSamples_ != 0) { - output.writeUInt32(4, lastNSamples_); + output.writeUInt32(3, lastNSamples_); } if (startTimestamp_ != null) { - output.writeMessage(5, getStartTimestamp()); + output.writeMessage(4, getStartTimestamp()); } if (endTimestamp_ != null) { - output.writeMessage(6, getEndTimestamp()); + output.writeMessage(5, getEndTimestamp()); } unknownFields.writeTo(output); } @@ -3294,29 +3472,25 @@ public final class Monitoring { if (size != -1) return size; size = 0; - for (int i = 0; i < kpiId_.size(); i++) { + for (int i = 0; i < kpiIds_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, kpiId_.get(i)); + .computeMessageSize(1, kpiIds_.get(i)); } if (monitoringWindowS_ != 0F) { size += com.google.protobuf.CodedOutputStream .computeFloatSize(2, monitoringWindowS_); } - if (samplingRateS_ != 0F) { - size += com.google.protobuf.CodedOutputStream - .computeFloatSize(3, samplingRateS_); - } if (lastNSamples_ != 0) { size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, lastNSamples_); + .computeUInt32Size(3, lastNSamples_); } if (startTimestamp_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, getStartTimestamp()); + .computeMessageSize(4, getStartTimestamp()); } if (endTimestamp_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(6, getEndTimestamp()); + .computeMessageSize(5, getEndTimestamp()); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -3333,14 +3507,11 @@ public final class Monitoring { } monitoring.Monitoring.KpiQuery other = (monitoring.Monitoring.KpiQuery) obj; - if (!getKpiIdList() - .equals(other.getKpiIdList())) return false; + if (!getKpiIdsList() + .equals(other.getKpiIdsList())) return false; if (java.lang.Float.floatToIntBits(getMonitoringWindowS()) != java.lang.Float.floatToIntBits( other.getMonitoringWindowS())) return false; - if (java.lang.Float.floatToIntBits(getSamplingRateS()) - != java.lang.Float.floatToIntBits( - other.getSamplingRateS())) return false; if (getLastNSamples() != other.getLastNSamples()) return false; if (hasStartTimestamp() != other.hasStartTimestamp()) return false; @@ -3364,16 +3535,13 @@ public final class Monitoring { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (getKpiIdCount() > 0) { - hash = (37 * hash) + KPI_ID_FIELD_NUMBER; - hash = (53 * hash) + getKpiIdList().hashCode(); + if (getKpiIdsCount() > 0) { + hash = (37 * hash) + KPI_IDS_FIELD_NUMBER; + hash = (53 * hash) + getKpiIdsList().hashCode(); } hash = (37 * hash) + MONITORING_WINDOW_S_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits( getMonitoringWindowS()); - hash = (37 * hash) + SAMPLING_RATE_S_FIELD_NUMBER; - hash = (53 * hash) + java.lang.Float.floatToIntBits( - getSamplingRateS()); hash = (37 * hash) + LAST_N_SAMPLES_FIELD_NUMBER; hash = (53 * hash) + getLastNSamples(); if (hasStartTimestamp()) { @@ -3512,22 +3680,20 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getKpiIdFieldBuilder(); + getKpiIdsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (kpiIdBuilder_ == null) { - kpiId_ = java.util.Collections.emptyList(); + if (kpiIdsBuilder_ == null) { + kpiIds_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - kpiIdBuilder_.clear(); + kpiIdsBuilder_.clear(); } monitoringWindowS_ = 0F; - samplingRateS_ = 0F; - lastNSamples_ = 0; if (startTimestampBuilder_ == null) { @@ -3569,17 +3735,16 @@ public final class Monitoring { public monitoring.Monitoring.KpiQuery buildPartial() { monitoring.Monitoring.KpiQuery result = new monitoring.Monitoring.KpiQuery(this); int from_bitField0_ = bitField0_; - if (kpiIdBuilder_ == null) { + if (kpiIdsBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { - kpiId_ = java.util.Collections.unmodifiableList(kpiId_); + kpiIds_ = java.util.Collections.unmodifiableList(kpiIds_); bitField0_ = (bitField0_ & ~0x00000001); } - result.kpiId_ = kpiId_; + result.kpiIds_ = kpiIds_; } else { - result.kpiId_ = kpiIdBuilder_.build(); + result.kpiIds_ = kpiIdsBuilder_.build(); } result.monitoringWindowS_ = monitoringWindowS_; - result.samplingRateS_ = samplingRateS_; result.lastNSamples_ = lastNSamples_; if (startTimestampBuilder_ == null) { result.startTimestamp_ = startTimestamp_; @@ -3639,38 +3804,35 @@ public final class Monitoring { public Builder mergeFrom(monitoring.Monitoring.KpiQuery other) { if (other == monitoring.Monitoring.KpiQuery.getDefaultInstance()) return this; - if (kpiIdBuilder_ == null) { - if (!other.kpiId_.isEmpty()) { - if (kpiId_.isEmpty()) { - kpiId_ = other.kpiId_; + if (kpiIdsBuilder_ == null) { + if (!other.kpiIds_.isEmpty()) { + if (kpiIds_.isEmpty()) { + kpiIds_ = other.kpiIds_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureKpiIdIsMutable(); - kpiId_.addAll(other.kpiId_); + ensureKpiIdsIsMutable(); + kpiIds_.addAll(other.kpiIds_); } onChanged(); } } else { - if (!other.kpiId_.isEmpty()) { - if (kpiIdBuilder_.isEmpty()) { - kpiIdBuilder_.dispose(); - kpiIdBuilder_ = null; - kpiId_ = other.kpiId_; + if (!other.kpiIds_.isEmpty()) { + if (kpiIdsBuilder_.isEmpty()) { + kpiIdsBuilder_.dispose(); + kpiIdsBuilder_ = null; + kpiIds_ = other.kpiIds_; bitField0_ = (bitField0_ & ~0x00000001); - kpiIdBuilder_ = + kpiIdsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getKpiIdFieldBuilder() : null; + getKpiIdsFieldBuilder() : null; } else { - kpiIdBuilder_.addAllMessages(other.kpiId_); + kpiIdsBuilder_.addAllMessages(other.kpiIds_); } } } if (other.getMonitoringWindowS() != 0F) { setMonitoringWindowS(other.getMonitoringWindowS()); } - if (other.getSamplingRateS() != 0F) { - setSamplingRateS(other.getSamplingRateS()); - } if (other.getLastNSamples() != 0) { setLastNSamples(other.getLastNSamples()); } @@ -3710,244 +3872,244 @@ public final class Monitoring { } private int bitField0_; - private java.util.List<monitoring.Monitoring.KpiId> kpiId_ = + private java.util.List<monitoring.Monitoring.KpiId> kpiIds_ = java.util.Collections.emptyList(); - private void ensureKpiIdIsMutable() { + private void ensureKpiIdsIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { - kpiId_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(kpiId_); + kpiIds_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(kpiIds_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> kpiIdBuilder_; + monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> kpiIdsBuilder_; /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public java.util.List<monitoring.Monitoring.KpiId> getKpiIdList() { - if (kpiIdBuilder_ == null) { - return java.util.Collections.unmodifiableList(kpiId_); + public java.util.List<monitoring.Monitoring.KpiId> getKpiIdsList() { + if (kpiIdsBuilder_ == null) { + return java.util.Collections.unmodifiableList(kpiIds_); } else { - return kpiIdBuilder_.getMessageList(); + return kpiIdsBuilder_.getMessageList(); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public int getKpiIdCount() { - if (kpiIdBuilder_ == null) { - return kpiId_.size(); + public int getKpiIdsCount() { + if (kpiIdsBuilder_ == null) { + return kpiIds_.size(); } else { - return kpiIdBuilder_.getCount(); + return kpiIdsBuilder_.getCount(); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public monitoring.Monitoring.KpiId getKpiId(int index) { - if (kpiIdBuilder_ == null) { - return kpiId_.get(index); + public monitoring.Monitoring.KpiId getKpiIds(int index) { + if (kpiIdsBuilder_ == null) { + return kpiIds_.get(index); } else { - return kpiIdBuilder_.getMessage(index); + return kpiIdsBuilder_.getMessage(index); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder setKpiId( + public Builder setKpiIds( int index, monitoring.Monitoring.KpiId value) { - if (kpiIdBuilder_ == null) { + if (kpiIdsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiIdIsMutable(); - kpiId_.set(index, value); + ensureKpiIdsIsMutable(); + kpiIds_.set(index, value); onChanged(); } else { - kpiIdBuilder_.setMessage(index, value); + kpiIdsBuilder_.setMessage(index, value); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder setKpiId( + public Builder setKpiIds( int index, monitoring.Monitoring.KpiId.Builder builderForValue) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.set(index, builderForValue.build()); + if (kpiIdsBuilder_ == null) { + ensureKpiIdsIsMutable(); + kpiIds_.set(index, builderForValue.build()); onChanged(); } else { - kpiIdBuilder_.setMessage(index, builderForValue.build()); + kpiIdsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder addKpiId(monitoring.Monitoring.KpiId value) { - if (kpiIdBuilder_ == null) { + public Builder addKpiIds(monitoring.Monitoring.KpiId value) { + if (kpiIdsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiIdIsMutable(); - kpiId_.add(value); + ensureKpiIdsIsMutable(); + kpiIds_.add(value); onChanged(); } else { - kpiIdBuilder_.addMessage(value); + kpiIdsBuilder_.addMessage(value); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder addKpiId( + public Builder addKpiIds( int index, monitoring.Monitoring.KpiId value) { - if (kpiIdBuilder_ == null) { + if (kpiIdsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiIdIsMutable(); - kpiId_.add(index, value); + ensureKpiIdsIsMutable(); + kpiIds_.add(index, value); onChanged(); } else { - kpiIdBuilder_.addMessage(index, value); + kpiIdsBuilder_.addMessage(index, value); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder addKpiId( + public Builder addKpiIds( monitoring.Monitoring.KpiId.Builder builderForValue) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.add(builderForValue.build()); + if (kpiIdsBuilder_ == null) { + ensureKpiIdsIsMutable(); + kpiIds_.add(builderForValue.build()); onChanged(); } else { - kpiIdBuilder_.addMessage(builderForValue.build()); + kpiIdsBuilder_.addMessage(builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder addKpiId( + public Builder addKpiIds( int index, monitoring.Monitoring.KpiId.Builder builderForValue) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.add(index, builderForValue.build()); + if (kpiIdsBuilder_ == null) { + ensureKpiIdsIsMutable(); + kpiIds_.add(index, builderForValue.build()); onChanged(); } else { - kpiIdBuilder_.addMessage(index, builderForValue.build()); + kpiIdsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder addAllKpiId( + public Builder addAllKpiIds( java.lang.Iterable<? extends monitoring.Monitoring.KpiId> values) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); + if (kpiIdsBuilder_ == null) { + ensureKpiIdsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, kpiId_); + values, kpiIds_); onChanged(); } else { - kpiIdBuilder_.addAllMessages(values); + kpiIdsBuilder_.addAllMessages(values); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder clearKpiId() { - if (kpiIdBuilder_ == null) { - kpiId_ = java.util.Collections.emptyList(); + public Builder clearKpiIds() { + if (kpiIdsBuilder_ == null) { + kpiIds_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - kpiIdBuilder_.clear(); + kpiIdsBuilder_.clear(); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public Builder removeKpiId(int index) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.remove(index); + public Builder removeKpiIds(int index) { + if (kpiIdsBuilder_ == null) { + ensureKpiIdsIsMutable(); + kpiIds_.remove(index); onChanged(); } else { - kpiIdBuilder_.remove(index); + kpiIdsBuilder_.remove(index); } return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public monitoring.Monitoring.KpiId.Builder getKpiIdBuilder( + public monitoring.Monitoring.KpiId.Builder getKpiIdsBuilder( int index) { - return getKpiIdFieldBuilder().getBuilder(index); + return getKpiIdsFieldBuilder().getBuilder(index); } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdsOrBuilder( int index) { - if (kpiIdBuilder_ == null) { - return kpiId_.get(index); } else { - return kpiIdBuilder_.getMessageOrBuilder(index); + if (kpiIdsBuilder_ == null) { + return kpiIds_.get(index); } else { + return kpiIdsBuilder_.getMessageOrBuilder(index); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ public java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList() { - if (kpiIdBuilder_ != null) { - return kpiIdBuilder_.getMessageOrBuilderList(); + getKpiIdsOrBuilderList() { + if (kpiIdsBuilder_ != null) { + return kpiIdsBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(kpiId_); + return java.util.Collections.unmodifiableList(kpiIds_); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public monitoring.Monitoring.KpiId.Builder addKpiIdBuilder() { - return getKpiIdFieldBuilder().addBuilder( + public monitoring.Monitoring.KpiId.Builder addKpiIdsBuilder() { + return getKpiIdsFieldBuilder().addBuilder( monitoring.Monitoring.KpiId.getDefaultInstance()); } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ - public monitoring.Monitoring.KpiId.Builder addKpiIdBuilder( + public monitoring.Monitoring.KpiId.Builder addKpiIdsBuilder( int index) { - return getKpiIdFieldBuilder().addBuilder( + return getKpiIdsFieldBuilder().addBuilder( index, monitoring.Monitoring.KpiId.getDefaultInstance()); } /** - * <code>repeated .monitoring.KpiId kpi_id = 1;</code> + * <code>repeated .monitoring.KpiId kpi_ids = 1;</code> */ public java.util.List<monitoring.Monitoring.KpiId.Builder> - getKpiIdBuilderList() { - return getKpiIdFieldBuilder().getBuilderList(); + getKpiIdsBuilderList() { + return getKpiIdsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdFieldBuilder() { - if (kpiIdBuilder_ == null) { - kpiIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + getKpiIdsFieldBuilder() { + if (kpiIdsBuilder_ == null) { + kpiIdsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder>( - kpiId_, + kpiIds_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - kpiId_ = null; + kpiIds_ = null; } - return kpiIdBuilder_; + return kpiIdsBuilder_; } private float monitoringWindowS_ ; @@ -3981,44 +4143,13 @@ public final class Monitoring { return this; } - private float samplingRateS_ ; - /** - * <code>float sampling_rate_s = 3;</code> - * @return The samplingRateS. - */ - @java.lang.Override - public float getSamplingRateS() { - return samplingRateS_; - } - /** - * <code>float sampling_rate_s = 3;</code> - * @param value The samplingRateS to set. - * @return This builder for chaining. - */ - public Builder setSamplingRateS(float value) { - - samplingRateS_ = value; - onChanged(); - return this; - } - /** - * <code>float sampling_rate_s = 3;</code> - * @return This builder for chaining. - */ - public Builder clearSamplingRateS() { - - samplingRateS_ = 0F; - onChanged(); - return this; - } - private int lastNSamples_ ; /** * <pre> * used when you want something like "get the last N many samples * </pre> * - * <code>uint32 last_n_samples = 4;</code> + * <code>uint32 last_n_samples = 3;</code> * @return The lastNSamples. */ @java.lang.Override @@ -4030,7 +4161,7 @@ public final class Monitoring { * used when you want something like "get the last N many samples * </pre> * - * <code>uint32 last_n_samples = 4;</code> + * <code>uint32 last_n_samples = 3;</code> * @param value The lastNSamples to set. * @return This builder for chaining. */ @@ -4045,7 +4176,7 @@ public final class Monitoring { * used when you want something like "get the last N many samples * </pre> * - * <code>uint32 last_n_samples = 4;</code> + * <code>uint32 last_n_samples = 3;</code> * @return This builder for chaining. */ public Builder clearLastNSamples() { @@ -4063,7 +4194,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return Whether the startTimestamp field is set. */ public boolean hasStartTimestamp() { @@ -4074,7 +4205,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> * @return The startTimestamp. */ public context.ContextOuterClass.Timestamp getStartTimestamp() { @@ -4089,7 +4220,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public Builder setStartTimestamp(context.ContextOuterClass.Timestamp value) { if (startTimestampBuilder_ == null) { @@ -4109,7 +4240,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public Builder setStartTimestamp( context.ContextOuterClass.Timestamp.Builder builderForValue) { @@ -4127,7 +4258,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public Builder mergeStartTimestamp(context.ContextOuterClass.Timestamp value) { if (startTimestampBuilder_ == null) { @@ -4149,7 +4280,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public Builder clearStartTimestamp() { if (startTimestampBuilder_ == null) { @@ -4167,7 +4298,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public context.ContextOuterClass.Timestamp.Builder getStartTimestampBuilder() { @@ -4179,7 +4310,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ public context.ContextOuterClass.TimestampOrBuilder getStartTimestampOrBuilder() { if (startTimestampBuilder_ != null) { @@ -4194,7 +4325,7 @@ public final class Monitoring { * used when you want something like "get the samples since X date/time" * </pre> * - * <code>.context.Timestamp start_timestamp = 5;</code> + * <code>.context.Timestamp start_timestamp = 4;</code> */ private com.google.protobuf.SingleFieldBuilderV3< context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> @@ -4218,7 +4349,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return Whether the endTimestamp field is set. */ public boolean hasEndTimestamp() { @@ -4229,7 +4360,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> * @return The endTimestamp. */ public context.ContextOuterClass.Timestamp getEndTimestamp() { @@ -4244,7 +4375,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public Builder setEndTimestamp(context.ContextOuterClass.Timestamp value) { if (endTimestampBuilder_ == null) { @@ -4264,7 +4395,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public Builder setEndTimestamp( context.ContextOuterClass.Timestamp.Builder builderForValue) { @@ -4282,7 +4413,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public Builder mergeEndTimestamp(context.ContextOuterClass.Timestamp value) { if (endTimestampBuilder_ == null) { @@ -4304,7 +4435,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public Builder clearEndTimestamp() { if (endTimestampBuilder_ == null) { @@ -4322,7 +4453,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public context.ContextOuterClass.Timestamp.Builder getEndTimestampBuilder() { @@ -4334,7 +4465,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ public context.ContextOuterClass.TimestampOrBuilder getEndTimestampOrBuilder() { if (endTimestampBuilder_ != null) { @@ -4349,7 +4480,7 @@ public final class Monitoring { * used when you want something like "get the samples until X date/time" * </pre> * - * <code>.context.Timestamp end_timestamp = 6;</code> + * <code>.context.Timestamp end_timestamp = 5;</code> */ private com.google.protobuf.SingleFieldBuilderV3< context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> @@ -4417,22 +4548,2659 @@ public final class Monitoring { } - public interface KpiIdOrBuilder extends - // @@protoc_insertion_point(interface_extends:monitoring.KpiId) + public interface RawKpiOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.RawKpi) com.google.protobuf.MessageOrBuilder { /** - * <code>.context.Uuid kpi_id = 1;</code> - * @return Whether the kpiId field is set. + * <code>.context.Timestamp timestamp = 1;</code> + * @return Whether the timestamp field is set. */ - boolean hasKpiId(); + boolean hasTimestamp(); /** - * <code>.context.Uuid kpi_id = 1;</code> - * @return The kpiId. + * <code>.context.Timestamp timestamp = 1;</code> + * @return The timestamp. */ - context.ContextOuterClass.Uuid getKpiId(); + context.ContextOuterClass.Timestamp getTimestamp(); /** - * <code>.context.Uuid kpi_id = 1;</code> + * <code>.context.Timestamp timestamp = 1;</code> + */ + context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder(); + + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return Whether the kpiValue field is set. + */ + boolean hasKpiValue(); + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return The kpiValue. + */ + monitoring.Monitoring.KpiValue getKpiValue(); + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder(); + } + /** + * <pre> + * cell + * </pre> + * + * Protobuf type {@code monitoring.RawKpi} + */ + public static final class RawKpi extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:monitoring.RawKpi) + RawKpiOrBuilder { + private static final long serialVersionUID = 0L; + // Use RawKpi.newBuilder() to construct. + private RawKpi(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { + super(builder); + } + private RawKpi() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RawKpi(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RawKpi( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + context.ContextOuterClass.Timestamp.Builder subBuilder = null; + if (timestamp_ != null) { + subBuilder = timestamp_.toBuilder(); + } + timestamp_ = input.readMessage(context.ContextOuterClass.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timestamp_); + timestamp_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + monitoring.Monitoring.KpiValue.Builder subBuilder = null; + if (kpiValue_ != null) { + subBuilder = kpiValue_.toBuilder(); + } + kpiValue_ = input.readMessage(monitoring.Monitoring.KpiValue.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(kpiValue_); + kpiValue_ = subBuilder.buildPartial(); + } + + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpi_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpi_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpi.class, monitoring.Monitoring.RawKpi.Builder.class); + } + + public static final int TIMESTAMP_FIELD_NUMBER = 1; + private context.ContextOuterClass.Timestamp timestamp_; + /** + * <code>.context.Timestamp timestamp = 1;</code> + * @return Whether the timestamp field is set. + */ + @java.lang.Override + public boolean hasTimestamp() { + return timestamp_ != null; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + * @return The timestamp. + */ + @java.lang.Override + public context.ContextOuterClass.Timestamp getTimestamp() { + return timestamp_ == null ? context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + @java.lang.Override + public context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder() { + return getTimestamp(); + } + + public static final int KPI_VALUE_FIELD_NUMBER = 2; + private monitoring.Monitoring.KpiValue kpiValue_; + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return Whether the kpiValue field is set. + */ + @java.lang.Override + public boolean hasKpiValue() { + return kpiValue_ != null; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return The kpiValue. + */ + @java.lang.Override + public monitoring.Monitoring.KpiValue getKpiValue() { + return kpiValue_ == null ? monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + @java.lang.Override + public monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder() { + return getKpiValue(); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (timestamp_ != null) { + output.writeMessage(1, getTimestamp()); + } + if (kpiValue_ != null) { + output.writeMessage(2, getKpiValue()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (timestamp_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getTimestamp()); + } + if (kpiValue_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getKpiValue()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof monitoring.Monitoring.RawKpi)) { + return super.equals(obj); + } + monitoring.Monitoring.RawKpi other = (monitoring.Monitoring.RawKpi) obj; + + if (hasTimestamp() != other.hasTimestamp()) return false; + if (hasTimestamp()) { + if (!getTimestamp() + .equals(other.getTimestamp())) return false; + } + if (hasKpiValue() != other.hasKpiValue()) return false; + if (hasKpiValue()) { + if (!getKpiValue() + .equals(other.getKpiValue())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getTimestamp().hashCode(); + } + if (hasKpiValue()) { + hash = (37 * hash) + KPI_VALUE_FIELD_NUMBER; + hash = (53 * hash) + getKpiValue().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static monitoring.Monitoring.RawKpi parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpi parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpi parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpi parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpi parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpi parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpi parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpi parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpi parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpi parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpi parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpi parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(monitoring.Monitoring.RawKpi prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * <pre> + * cell + * </pre> + * + * Protobuf type {@code monitoring.RawKpi} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements + // @@protoc_insertion_point(builder_implements:monitoring.RawKpi) + monitoring.Monitoring.RawKpiOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpi_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpi_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpi.class, monitoring.Monitoring.RawKpi.Builder.class); + } + + // Construct using monitoring.Monitoring.RawKpi.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (timestampBuilder_ == null) { + timestamp_ = null; + } else { + timestamp_ = null; + timestampBuilder_ = null; + } + if (kpiValueBuilder_ == null) { + kpiValue_ = null; + } else { + kpiValue_ = null; + kpiValueBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return monitoring.Monitoring.internal_static_monitoring_RawKpi_descriptor; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpi getDefaultInstanceForType() { + return monitoring.Monitoring.RawKpi.getDefaultInstance(); + } + + @java.lang.Override + public monitoring.Monitoring.RawKpi build() { + monitoring.Monitoring.RawKpi result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpi buildPartial() { + monitoring.Monitoring.RawKpi result = new monitoring.Monitoring.RawKpi(this); + if (timestampBuilder_ == null) { + result.timestamp_ = timestamp_; + } else { + result.timestamp_ = timestampBuilder_.build(); + } + if (kpiValueBuilder_ == null) { + result.kpiValue_ = kpiValue_; + } else { + result.kpiValue_ = kpiValueBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof monitoring.Monitoring.RawKpi) { + return mergeFrom((monitoring.Monitoring.RawKpi)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(monitoring.Monitoring.RawKpi other) { + if (other == monitoring.Monitoring.RawKpi.getDefaultInstance()) return this; + if (other.hasTimestamp()) { + mergeTimestamp(other.getTimestamp()); + } + if (other.hasKpiValue()) { + mergeKpiValue(other.getKpiValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + monitoring.Monitoring.RawKpi parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (monitoring.Monitoring.RawKpi) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private context.ContextOuterClass.Timestamp timestamp_; + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> timestampBuilder_; + /** + * <code>.context.Timestamp timestamp = 1;</code> + * @return Whether the timestamp field is set. + */ + public boolean hasTimestamp() { + return timestampBuilder_ != null || timestamp_ != null; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + * @return The timestamp. + */ + public context.ContextOuterClass.Timestamp getTimestamp() { + if (timestampBuilder_ == null) { + return timestamp_ == null ? context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + } else { + return timestampBuilder_.getMessage(); + } + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public Builder setTimestamp(context.ContextOuterClass.Timestamp value) { + if (timestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestamp_ = value; + onChanged(); + } else { + timestampBuilder_.setMessage(value); + } + + return this; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public Builder setTimestamp( + context.ContextOuterClass.Timestamp.Builder builderForValue) { + if (timestampBuilder_ == null) { + timestamp_ = builderForValue.build(); + onChanged(); + } else { + timestampBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public Builder mergeTimestamp(context.ContextOuterClass.Timestamp value) { + if (timestampBuilder_ == null) { + if (timestamp_ != null) { + timestamp_ = + context.ContextOuterClass.Timestamp.newBuilder(timestamp_).mergeFrom(value).buildPartial(); + } else { + timestamp_ = value; + } + onChanged(); + } else { + timestampBuilder_.mergeFrom(value); + } + + return this; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public Builder clearTimestamp() { + if (timestampBuilder_ == null) { + timestamp_ = null; + onChanged(); + } else { + timestamp_ = null; + timestampBuilder_ = null; + } + + return this; + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public context.ContextOuterClass.Timestamp.Builder getTimestampBuilder() { + + onChanged(); + return getTimestampFieldBuilder().getBuilder(); + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + public context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder() { + if (timestampBuilder_ != null) { + return timestampBuilder_.getMessageOrBuilder(); + } else { + return timestamp_ == null ? + context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + } + } + /** + * <code>.context.Timestamp timestamp = 1;</code> + */ + private com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> + getTimestampFieldBuilder() { + if (timestampBuilder_ == null) { + timestampBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder>( + getTimestamp(), + getParentForChildren(), + isClean()); + timestamp_ = null; + } + return timestampBuilder_; + } + + private monitoring.Monitoring.KpiValue kpiValue_; + private com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder> kpiValueBuilder_; + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return Whether the kpiValue field is set. + */ + public boolean hasKpiValue() { + return kpiValueBuilder_ != null || kpiValue_ != null; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + * @return The kpiValue. + */ + public monitoring.Monitoring.KpiValue getKpiValue() { + if (kpiValueBuilder_ == null) { + return kpiValue_ == null ? monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; + } else { + return kpiValueBuilder_.getMessage(); + } + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public Builder setKpiValue(monitoring.Monitoring.KpiValue value) { + if (kpiValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + kpiValue_ = value; + onChanged(); + } else { + kpiValueBuilder_.setMessage(value); + } + + return this; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public Builder setKpiValue( + monitoring.Monitoring.KpiValue.Builder builderForValue) { + if (kpiValueBuilder_ == null) { + kpiValue_ = builderForValue.build(); + onChanged(); + } else { + kpiValueBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public Builder mergeKpiValue(monitoring.Monitoring.KpiValue value) { + if (kpiValueBuilder_ == null) { + if (kpiValue_ != null) { + kpiValue_ = + monitoring.Monitoring.KpiValue.newBuilder(kpiValue_).mergeFrom(value).buildPartial(); + } else { + kpiValue_ = value; + } + onChanged(); + } else { + kpiValueBuilder_.mergeFrom(value); + } + + return this; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public Builder clearKpiValue() { + if (kpiValueBuilder_ == null) { + kpiValue_ = null; + onChanged(); + } else { + kpiValue_ = null; + kpiValueBuilder_ = null; + } + + return this; + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public monitoring.Monitoring.KpiValue.Builder getKpiValueBuilder() { + + onChanged(); + return getKpiValueFieldBuilder().getBuilder(); + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + public monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder() { + if (kpiValueBuilder_ != null) { + return kpiValueBuilder_.getMessageOrBuilder(); + } else { + return kpiValue_ == null ? + monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; + } + } + /** + * <code>.monitoring.KpiValue kpi_value = 2;</code> + */ + private com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder> + getKpiValueFieldBuilder() { + if (kpiValueBuilder_ == null) { + kpiValueBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder>( + getKpiValue(), + getParentForChildren(), + isClean()); + kpiValue_ = null; + } + return kpiValueBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:monitoring.RawKpi) + } + + // @@protoc_insertion_point(class_scope:monitoring.RawKpi) + private static final monitoring.Monitoring.RawKpi DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new monitoring.Monitoring.RawKpi(); + } + + public static monitoring.Monitoring.RawKpi getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser<RawKpi> + PARSER = new com.google.protobuf.AbstractParser<RawKpi>() { + @java.lang.Override + public RawKpi parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RawKpi(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser<RawKpi> parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser<RawKpi> getParserForType() { + return PARSER; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpi getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RawKpiListOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.RawKpiList) + com.google.protobuf.MessageOrBuilder { + + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return Whether the kpiId field is set. + */ + boolean hasKpiId(); + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return The kpiId. + */ + monitoring.Monitoring.KpiId getKpiId(); + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder(); + + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + java.util.List<monitoring.Monitoring.RawKpi> + getRawKpisList(); + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + monitoring.Monitoring.RawKpi getRawKpis(int index); + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + int getRawKpisCount(); + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + java.util.List<? extends monitoring.Monitoring.RawKpiOrBuilder> + getRawKpisOrBuilderList(); + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + monitoring.Monitoring.RawKpiOrBuilder getRawKpisOrBuilder( + int index); + } + /** + * <pre> + * column + * </pre> + * + * Protobuf type {@code monitoring.RawKpiList} + */ + public static final class RawKpiList extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:monitoring.RawKpiList) + RawKpiListOrBuilder { + private static final long serialVersionUID = 0L; + // Use RawKpiList.newBuilder() to construct. + private RawKpiList(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { + super(builder); + } + private RawKpiList() { + rawKpis_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RawKpiList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RawKpiList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + monitoring.Monitoring.KpiId.Builder subBuilder = null; + if (kpiId_ != null) { + subBuilder = kpiId_.toBuilder(); + } + kpiId_ = input.readMessage(monitoring.Monitoring.KpiId.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(kpiId_); + kpiId_ = subBuilder.buildPartial(); + } + + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + rawKpis_ = new java.util.ArrayList<monitoring.Monitoring.RawKpi>(); + mutable_bitField0_ |= 0x00000001; + } + rawKpis_.add( + input.readMessage(monitoring.Monitoring.RawKpi.parser(), extensionRegistry)); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + rawKpis_ = java.util.Collections.unmodifiableList(rawKpis_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpiList.class, monitoring.Monitoring.RawKpiList.Builder.class); + } + + public static final int KPI_ID_FIELD_NUMBER = 1; + private monitoring.Monitoring.KpiId kpiId_; + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return Whether the kpiId field is set. + */ + @java.lang.Override + public boolean hasKpiId() { + return kpiId_ != null; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return The kpiId. + */ + @java.lang.Override + public monitoring.Monitoring.KpiId getKpiId() { + return kpiId_ == null ? monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + @java.lang.Override + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder() { + return getKpiId(); + } + + public static final int RAW_KPIS_FIELD_NUMBER = 2; + private java.util.List<monitoring.Monitoring.RawKpi> rawKpis_; + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + @java.lang.Override + public java.util.List<monitoring.Monitoring.RawKpi> getRawKpisList() { + return rawKpis_; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + @java.lang.Override + public java.util.List<? extends monitoring.Monitoring.RawKpiOrBuilder> + getRawKpisOrBuilderList() { + return rawKpis_; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + @java.lang.Override + public int getRawKpisCount() { + return rawKpis_.size(); + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + @java.lang.Override + public monitoring.Monitoring.RawKpi getRawKpis(int index) { + return rawKpis_.get(index); + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + @java.lang.Override + public monitoring.Monitoring.RawKpiOrBuilder getRawKpisOrBuilder( + int index) { + return rawKpis_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (kpiId_ != null) { + output.writeMessage(1, getKpiId()); + } + for (int i = 0; i < rawKpis_.size(); i++) { + output.writeMessage(2, rawKpis_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (kpiId_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getKpiId()); + } + for (int i = 0; i < rawKpis_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, rawKpis_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof monitoring.Monitoring.RawKpiList)) { + return super.equals(obj); + } + monitoring.Monitoring.RawKpiList other = (monitoring.Monitoring.RawKpiList) obj; + + if (hasKpiId() != other.hasKpiId()) return false; + if (hasKpiId()) { + if (!getKpiId() + .equals(other.getKpiId())) return false; + } + if (!getRawKpisList() + .equals(other.getRawKpisList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasKpiId()) { + hash = (37 * hash) + KPI_ID_FIELD_NUMBER; + hash = (53 * hash) + getKpiId().hashCode(); + } + if (getRawKpisCount() > 0) { + hash = (37 * hash) + RAW_KPIS_FIELD_NUMBER; + hash = (53 * hash) + getRawKpisList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static monitoring.Monitoring.RawKpiList parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(monitoring.Monitoring.RawKpiList prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * <pre> + * column + * </pre> + * + * Protobuf type {@code monitoring.RawKpiList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements + // @@protoc_insertion_point(builder_implements:monitoring.RawKpiList) + monitoring.Monitoring.RawKpiListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiList_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpiList.class, monitoring.Monitoring.RawKpiList.Builder.class); + } + + // Construct using monitoring.Monitoring.RawKpiList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRawKpisFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (kpiIdBuilder_ == null) { + kpiId_ = null; + } else { + kpiId_ = null; + kpiIdBuilder_ = null; + } + if (rawKpisBuilder_ == null) { + rawKpis_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + rawKpisBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiList_descriptor; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiList getDefaultInstanceForType() { + return monitoring.Monitoring.RawKpiList.getDefaultInstance(); + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiList build() { + monitoring.Monitoring.RawKpiList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiList buildPartial() { + monitoring.Monitoring.RawKpiList result = new monitoring.Monitoring.RawKpiList(this); + int from_bitField0_ = bitField0_; + if (kpiIdBuilder_ == null) { + result.kpiId_ = kpiId_; + } else { + result.kpiId_ = kpiIdBuilder_.build(); + } + if (rawKpisBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + rawKpis_ = java.util.Collections.unmodifiableList(rawKpis_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.rawKpis_ = rawKpis_; + } else { + result.rawKpis_ = rawKpisBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof monitoring.Monitoring.RawKpiList) { + return mergeFrom((monitoring.Monitoring.RawKpiList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(monitoring.Monitoring.RawKpiList other) { + if (other == monitoring.Monitoring.RawKpiList.getDefaultInstance()) return this; + if (other.hasKpiId()) { + mergeKpiId(other.getKpiId()); + } + if (rawKpisBuilder_ == null) { + if (!other.rawKpis_.isEmpty()) { + if (rawKpis_.isEmpty()) { + rawKpis_ = other.rawKpis_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRawKpisIsMutable(); + rawKpis_.addAll(other.rawKpis_); + } + onChanged(); + } + } else { + if (!other.rawKpis_.isEmpty()) { + if (rawKpisBuilder_.isEmpty()) { + rawKpisBuilder_.dispose(); + rawKpisBuilder_ = null; + rawKpis_ = other.rawKpis_; + bitField0_ = (bitField0_ & ~0x00000001); + rawKpisBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRawKpisFieldBuilder() : null; + } else { + rawKpisBuilder_.addAllMessages(other.rawKpis_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + monitoring.Monitoring.RawKpiList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (monitoring.Monitoring.RawKpiList) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private monitoring.Monitoring.KpiId kpiId_; + private com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> kpiIdBuilder_; + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return Whether the kpiId field is set. + */ + public boolean hasKpiId() { + return kpiIdBuilder_ != null || kpiId_ != null; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + * @return The kpiId. + */ + public monitoring.Monitoring.KpiId getKpiId() { + if (kpiIdBuilder_ == null) { + return kpiId_ == null ? monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; + } else { + return kpiIdBuilder_.getMessage(); + } + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public Builder setKpiId(monitoring.Monitoring.KpiId value) { + if (kpiIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + kpiId_ = value; + onChanged(); + } else { + kpiIdBuilder_.setMessage(value); + } + + return this; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public Builder setKpiId( + monitoring.Monitoring.KpiId.Builder builderForValue) { + if (kpiIdBuilder_ == null) { + kpiId_ = builderForValue.build(); + onChanged(); + } else { + kpiIdBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public Builder mergeKpiId(monitoring.Monitoring.KpiId value) { + if (kpiIdBuilder_ == null) { + if (kpiId_ != null) { + kpiId_ = + monitoring.Monitoring.KpiId.newBuilder(kpiId_).mergeFrom(value).buildPartial(); + } else { + kpiId_ = value; + } + onChanged(); + } else { + kpiIdBuilder_.mergeFrom(value); + } + + return this; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public Builder clearKpiId() { + if (kpiIdBuilder_ == null) { + kpiId_ = null; + onChanged(); + } else { + kpiId_ = null; + kpiIdBuilder_ = null; + } + + return this; + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public monitoring.Monitoring.KpiId.Builder getKpiIdBuilder() { + + onChanged(); + return getKpiIdFieldBuilder().getBuilder(); + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder() { + if (kpiIdBuilder_ != null) { + return kpiIdBuilder_.getMessageOrBuilder(); + } else { + return kpiId_ == null ? + monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; + } + } + /** + * <code>.monitoring.KpiId kpi_id = 1;</code> + */ + private com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> + getKpiIdFieldBuilder() { + if (kpiIdBuilder_ == null) { + kpiIdBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder>( + getKpiId(), + getParentForChildren(), + isClean()); + kpiId_ = null; + } + return kpiIdBuilder_; + } + + private java.util.List<monitoring.Monitoring.RawKpi> rawKpis_ = + java.util.Collections.emptyList(); + private void ensureRawKpisIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + rawKpis_ = new java.util.ArrayList<monitoring.Monitoring.RawKpi>(rawKpis_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpi, monitoring.Monitoring.RawKpi.Builder, monitoring.Monitoring.RawKpiOrBuilder> rawKpisBuilder_; + + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public java.util.List<monitoring.Monitoring.RawKpi> getRawKpisList() { + if (rawKpisBuilder_ == null) { + return java.util.Collections.unmodifiableList(rawKpis_); + } else { + return rawKpisBuilder_.getMessageList(); + } + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public int getRawKpisCount() { + if (rawKpisBuilder_ == null) { + return rawKpis_.size(); + } else { + return rawKpisBuilder_.getCount(); + } + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public monitoring.Monitoring.RawKpi getRawKpis(int index) { + if (rawKpisBuilder_ == null) { + return rawKpis_.get(index); + } else { + return rawKpisBuilder_.getMessage(index); + } + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder setRawKpis( + int index, monitoring.Monitoring.RawKpi value) { + if (rawKpisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpisIsMutable(); + rawKpis_.set(index, value); + onChanged(); + } else { + rawKpisBuilder_.setMessage(index, value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder setRawKpis( + int index, monitoring.Monitoring.RawKpi.Builder builderForValue) { + if (rawKpisBuilder_ == null) { + ensureRawKpisIsMutable(); + rawKpis_.set(index, builderForValue.build()); + onChanged(); + } else { + rawKpisBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder addRawKpis(monitoring.Monitoring.RawKpi value) { + if (rawKpisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpisIsMutable(); + rawKpis_.add(value); + onChanged(); + } else { + rawKpisBuilder_.addMessage(value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder addRawKpis( + int index, monitoring.Monitoring.RawKpi value) { + if (rawKpisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpisIsMutable(); + rawKpis_.add(index, value); + onChanged(); + } else { + rawKpisBuilder_.addMessage(index, value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder addRawKpis( + monitoring.Monitoring.RawKpi.Builder builderForValue) { + if (rawKpisBuilder_ == null) { + ensureRawKpisIsMutable(); + rawKpis_.add(builderForValue.build()); + onChanged(); + } else { + rawKpisBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder addRawKpis( + int index, monitoring.Monitoring.RawKpi.Builder builderForValue) { + if (rawKpisBuilder_ == null) { + ensureRawKpisIsMutable(); + rawKpis_.add(index, builderForValue.build()); + onChanged(); + } else { + rawKpisBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder addAllRawKpis( + java.lang.Iterable<? extends monitoring.Monitoring.RawKpi> values) { + if (rawKpisBuilder_ == null) { + ensureRawKpisIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, rawKpis_); + onChanged(); + } else { + rawKpisBuilder_.addAllMessages(values); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder clearRawKpis() { + if (rawKpisBuilder_ == null) { + rawKpis_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + rawKpisBuilder_.clear(); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public Builder removeRawKpis(int index) { + if (rawKpisBuilder_ == null) { + ensureRawKpisIsMutable(); + rawKpis_.remove(index); + onChanged(); + } else { + rawKpisBuilder_.remove(index); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public monitoring.Monitoring.RawKpi.Builder getRawKpisBuilder( + int index) { + return getRawKpisFieldBuilder().getBuilder(index); + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public monitoring.Monitoring.RawKpiOrBuilder getRawKpisOrBuilder( + int index) { + if (rawKpisBuilder_ == null) { + return rawKpis_.get(index); } else { + return rawKpisBuilder_.getMessageOrBuilder(index); + } + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public java.util.List<? extends monitoring.Monitoring.RawKpiOrBuilder> + getRawKpisOrBuilderList() { + if (rawKpisBuilder_ != null) { + return rawKpisBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rawKpis_); + } + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public monitoring.Monitoring.RawKpi.Builder addRawKpisBuilder() { + return getRawKpisFieldBuilder().addBuilder( + monitoring.Monitoring.RawKpi.getDefaultInstance()); + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public monitoring.Monitoring.RawKpi.Builder addRawKpisBuilder( + int index) { + return getRawKpisFieldBuilder().addBuilder( + index, monitoring.Monitoring.RawKpi.getDefaultInstance()); + } + /** + * <code>repeated .monitoring.RawKpi raw_kpis = 2;</code> + */ + public java.util.List<monitoring.Monitoring.RawKpi.Builder> + getRawKpisBuilderList() { + return getRawKpisFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpi, monitoring.Monitoring.RawKpi.Builder, monitoring.Monitoring.RawKpiOrBuilder> + getRawKpisFieldBuilder() { + if (rawKpisBuilder_ == null) { + rawKpisBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpi, monitoring.Monitoring.RawKpi.Builder, monitoring.Monitoring.RawKpiOrBuilder>( + rawKpis_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + rawKpis_ = null; + } + return rawKpisBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:monitoring.RawKpiList) + } + + // @@protoc_insertion_point(class_scope:monitoring.RawKpiList) + private static final monitoring.Monitoring.RawKpiList DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new monitoring.Monitoring.RawKpiList(); + } + + public static monitoring.Monitoring.RawKpiList getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser<RawKpiList> + PARSER = new com.google.protobuf.AbstractParser<RawKpiList>() { + @java.lang.Override + public RawKpiList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RawKpiList(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser<RawKpiList> parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser<RawKpiList> getParserForType() { + return PARSER; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiList getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface RawKpiTableOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.RawKpiTable) + com.google.protobuf.MessageOrBuilder { + + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + java.util.List<monitoring.Monitoring.RawKpiList> + getRawKpiListsList(); + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + monitoring.Monitoring.RawKpiList getRawKpiLists(int index); + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + int getRawKpiListsCount(); + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + java.util.List<? extends monitoring.Monitoring.RawKpiListOrBuilder> + getRawKpiListsOrBuilderList(); + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + monitoring.Monitoring.RawKpiListOrBuilder getRawKpiListsOrBuilder( + int index); + } + /** + * <pre> + * table + * </pre> + * + * Protobuf type {@code monitoring.RawKpiTable} + */ + public static final class RawKpiTable extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:monitoring.RawKpiTable) + RawKpiTableOrBuilder { + private static final long serialVersionUID = 0L; + // Use RawKpiTable.newBuilder() to construct. + private RawKpiTable(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { + super(builder); + } + private RawKpiTable() { + rawKpiLists_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RawKpiTable(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RawKpiTable( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + rawKpiLists_ = new java.util.ArrayList<monitoring.Monitoring.RawKpiList>(); + mutable_bitField0_ |= 0x00000001; + } + rawKpiLists_.add( + input.readMessage(monitoring.Monitoring.RawKpiList.parser(), extensionRegistry)); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + rawKpiLists_ = java.util.Collections.unmodifiableList(rawKpiLists_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiTable_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiTable_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpiTable.class, monitoring.Monitoring.RawKpiTable.Builder.class); + } + + public static final int RAW_KPI_LISTS_FIELD_NUMBER = 1; + private java.util.List<monitoring.Monitoring.RawKpiList> rawKpiLists_; + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + @java.lang.Override + public java.util.List<monitoring.Monitoring.RawKpiList> getRawKpiListsList() { + return rawKpiLists_; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + @java.lang.Override + public java.util.List<? extends monitoring.Monitoring.RawKpiListOrBuilder> + getRawKpiListsOrBuilderList() { + return rawKpiLists_; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + @java.lang.Override + public int getRawKpiListsCount() { + return rawKpiLists_.size(); + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + @java.lang.Override + public monitoring.Monitoring.RawKpiList getRawKpiLists(int index) { + return rawKpiLists_.get(index); + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + @java.lang.Override + public monitoring.Monitoring.RawKpiListOrBuilder getRawKpiListsOrBuilder( + int index) { + return rawKpiLists_.get(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < rawKpiLists_.size(); i++) { + output.writeMessage(1, rawKpiLists_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < rawKpiLists_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, rawKpiLists_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof monitoring.Monitoring.RawKpiTable)) { + return super.equals(obj); + } + monitoring.Monitoring.RawKpiTable other = (monitoring.Monitoring.RawKpiTable) obj; + + if (!getRawKpiListsList() + .equals(other.getRawKpiListsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getRawKpiListsCount() > 0) { + hash = (37 * hash) + RAW_KPI_LISTS_FIELD_NUMBER; + hash = (53 * hash) + getRawKpiListsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static monitoring.Monitoring.RawKpiTable parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiTable parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiTable parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiTable parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiTable parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static monitoring.Monitoring.RawKpiTable parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(monitoring.Monitoring.RawKpiTable prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * <pre> + * table + * </pre> + * + * Protobuf type {@code monitoring.RawKpiTable} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements + // @@protoc_insertion_point(builder_implements:monitoring.RawKpiTable) + monitoring.Monitoring.RawKpiTableOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiTable_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiTable_fieldAccessorTable + .ensureFieldAccessorsInitialized( + monitoring.Monitoring.RawKpiTable.class, monitoring.Monitoring.RawKpiTable.Builder.class); + } + + // Construct using monitoring.Monitoring.RawKpiTable.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRawKpiListsFieldBuilder(); + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + if (rawKpiListsBuilder_ == null) { + rawKpiLists_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + rawKpiListsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return monitoring.Monitoring.internal_static_monitoring_RawKpiTable_descriptor; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiTable getDefaultInstanceForType() { + return monitoring.Monitoring.RawKpiTable.getDefaultInstance(); + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiTable build() { + monitoring.Monitoring.RawKpiTable result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiTable buildPartial() { + monitoring.Monitoring.RawKpiTable result = new monitoring.Monitoring.RawKpiTable(this); + int from_bitField0_ = bitField0_; + if (rawKpiListsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + rawKpiLists_ = java.util.Collections.unmodifiableList(rawKpiLists_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.rawKpiLists_ = rawKpiLists_; + } else { + result.rawKpiLists_ = rawKpiListsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof monitoring.Monitoring.RawKpiTable) { + return mergeFrom((monitoring.Monitoring.RawKpiTable)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(monitoring.Monitoring.RawKpiTable other) { + if (other == monitoring.Monitoring.RawKpiTable.getDefaultInstance()) return this; + if (rawKpiListsBuilder_ == null) { + if (!other.rawKpiLists_.isEmpty()) { + if (rawKpiLists_.isEmpty()) { + rawKpiLists_ = other.rawKpiLists_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRawKpiListsIsMutable(); + rawKpiLists_.addAll(other.rawKpiLists_); + } + onChanged(); + } + } else { + if (!other.rawKpiLists_.isEmpty()) { + if (rawKpiListsBuilder_.isEmpty()) { + rawKpiListsBuilder_.dispose(); + rawKpiListsBuilder_ = null; + rawKpiLists_ = other.rawKpiLists_; + bitField0_ = (bitField0_ & ~0x00000001); + rawKpiListsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getRawKpiListsFieldBuilder() : null; + } else { + rawKpiListsBuilder_.addAllMessages(other.rawKpiLists_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + monitoring.Monitoring.RawKpiTable parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (monitoring.Monitoring.RawKpiTable) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List<monitoring.Monitoring.RawKpiList> rawKpiLists_ = + java.util.Collections.emptyList(); + private void ensureRawKpiListsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + rawKpiLists_ = new java.util.ArrayList<monitoring.Monitoring.RawKpiList>(rawKpiLists_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpiList, monitoring.Monitoring.RawKpiList.Builder, monitoring.Monitoring.RawKpiListOrBuilder> rawKpiListsBuilder_; + + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public java.util.List<monitoring.Monitoring.RawKpiList> getRawKpiListsList() { + if (rawKpiListsBuilder_ == null) { + return java.util.Collections.unmodifiableList(rawKpiLists_); + } else { + return rawKpiListsBuilder_.getMessageList(); + } + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public int getRawKpiListsCount() { + if (rawKpiListsBuilder_ == null) { + return rawKpiLists_.size(); + } else { + return rawKpiListsBuilder_.getCount(); + } + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public monitoring.Monitoring.RawKpiList getRawKpiLists(int index) { + if (rawKpiListsBuilder_ == null) { + return rawKpiLists_.get(index); + } else { + return rawKpiListsBuilder_.getMessage(index); + } + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder setRawKpiLists( + int index, monitoring.Monitoring.RawKpiList value) { + if (rawKpiListsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpiListsIsMutable(); + rawKpiLists_.set(index, value); + onChanged(); + } else { + rawKpiListsBuilder_.setMessage(index, value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder setRawKpiLists( + int index, monitoring.Monitoring.RawKpiList.Builder builderForValue) { + if (rawKpiListsBuilder_ == null) { + ensureRawKpiListsIsMutable(); + rawKpiLists_.set(index, builderForValue.build()); + onChanged(); + } else { + rawKpiListsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder addRawKpiLists(monitoring.Monitoring.RawKpiList value) { + if (rawKpiListsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpiListsIsMutable(); + rawKpiLists_.add(value); + onChanged(); + } else { + rawKpiListsBuilder_.addMessage(value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder addRawKpiLists( + int index, monitoring.Monitoring.RawKpiList value) { + if (rawKpiListsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRawKpiListsIsMutable(); + rawKpiLists_.add(index, value); + onChanged(); + } else { + rawKpiListsBuilder_.addMessage(index, value); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder addRawKpiLists( + monitoring.Monitoring.RawKpiList.Builder builderForValue) { + if (rawKpiListsBuilder_ == null) { + ensureRawKpiListsIsMutable(); + rawKpiLists_.add(builderForValue.build()); + onChanged(); + } else { + rawKpiListsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder addRawKpiLists( + int index, monitoring.Monitoring.RawKpiList.Builder builderForValue) { + if (rawKpiListsBuilder_ == null) { + ensureRawKpiListsIsMutable(); + rawKpiLists_.add(index, builderForValue.build()); + onChanged(); + } else { + rawKpiListsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder addAllRawKpiLists( + java.lang.Iterable<? extends monitoring.Monitoring.RawKpiList> values) { + if (rawKpiListsBuilder_ == null) { + ensureRawKpiListsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, rawKpiLists_); + onChanged(); + } else { + rawKpiListsBuilder_.addAllMessages(values); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder clearRawKpiLists() { + if (rawKpiListsBuilder_ == null) { + rawKpiLists_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + rawKpiListsBuilder_.clear(); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public Builder removeRawKpiLists(int index) { + if (rawKpiListsBuilder_ == null) { + ensureRawKpiListsIsMutable(); + rawKpiLists_.remove(index); + onChanged(); + } else { + rawKpiListsBuilder_.remove(index); + } + return this; + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public monitoring.Monitoring.RawKpiList.Builder getRawKpiListsBuilder( + int index) { + return getRawKpiListsFieldBuilder().getBuilder(index); + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public monitoring.Monitoring.RawKpiListOrBuilder getRawKpiListsOrBuilder( + int index) { + if (rawKpiListsBuilder_ == null) { + return rawKpiLists_.get(index); } else { + return rawKpiListsBuilder_.getMessageOrBuilder(index); + } + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public java.util.List<? extends monitoring.Monitoring.RawKpiListOrBuilder> + getRawKpiListsOrBuilderList() { + if (rawKpiListsBuilder_ != null) { + return rawKpiListsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rawKpiLists_); + } + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public monitoring.Monitoring.RawKpiList.Builder addRawKpiListsBuilder() { + return getRawKpiListsFieldBuilder().addBuilder( + monitoring.Monitoring.RawKpiList.getDefaultInstance()); + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public monitoring.Monitoring.RawKpiList.Builder addRawKpiListsBuilder( + int index) { + return getRawKpiListsFieldBuilder().addBuilder( + index, monitoring.Monitoring.RawKpiList.getDefaultInstance()); + } + /** + * <code>repeated .monitoring.RawKpiList raw_kpi_lists = 1;</code> + */ + public java.util.List<monitoring.Monitoring.RawKpiList.Builder> + getRawKpiListsBuilderList() { + return getRawKpiListsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpiList, monitoring.Monitoring.RawKpiList.Builder, monitoring.Monitoring.RawKpiListOrBuilder> + getRawKpiListsFieldBuilder() { + if (rawKpiListsBuilder_ == null) { + rawKpiListsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.RawKpiList, monitoring.Monitoring.RawKpiList.Builder, monitoring.Monitoring.RawKpiListOrBuilder>( + rawKpiLists_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + rawKpiLists_ = null; + } + return rawKpiListsBuilder_; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:monitoring.RawKpiTable) + } + + // @@protoc_insertion_point(class_scope:monitoring.RawKpiTable) + private static final monitoring.Monitoring.RawKpiTable DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new monitoring.Monitoring.RawKpiTable(); + } + + public static monitoring.Monitoring.RawKpiTable getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser<RawKpiTable> + PARSER = new com.google.protobuf.AbstractParser<RawKpiTable>() { + @java.lang.Override + public RawKpiTable parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RawKpiTable(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser<RawKpiTable> parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser<RawKpiTable> getParserForType() { + return PARSER; + } + + @java.lang.Override + public monitoring.Monitoring.RawKpiTable getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface KpiIdOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.KpiId) + com.google.protobuf.MessageOrBuilder { + + /** + * <code>.context.Uuid kpi_id = 1;</code> + * @return Whether the kpiId field is set. + */ + boolean hasKpiId(); + /** + * <code>.context.Uuid kpi_id = 1;</code> + * @return The kpiId. + */ + context.ContextOuterClass.Uuid getKpiId(); + /** + * <code>.context.Uuid kpi_id = 1;</code> */ context.ContextOuterClass.UuidOrBuilder getKpiIdOrBuilder(); } @@ -8482,27 +11250,27 @@ public final class Monitoring { com.google.protobuf.MessageOrBuilder { /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ java.util.List<monitoring.Monitoring.Kpi> - getKpiListList(); + getKpiList(); /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - monitoring.Monitoring.Kpi getKpiList(int index); + monitoring.Monitoring.Kpi getKpi(int index); /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - int getKpiListCount(); + int getKpiCount(); /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ java.util.List<? extends monitoring.Monitoring.KpiOrBuilder> - getKpiListOrBuilderList(); + getKpiOrBuilderList(); /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - monitoring.Monitoring.KpiOrBuilder getKpiListOrBuilder( + monitoring.Monitoring.KpiOrBuilder getKpiOrBuilder( int index); } /** @@ -8518,7 +11286,7 @@ public final class Monitoring { super(builder); } private KpiList() { - kpiList_ = java.util.Collections.emptyList(); + kpi_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -8554,10 +11322,10 @@ public final class Monitoring { break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - kpiList_ = new java.util.ArrayList<monitoring.Monitoring.Kpi>(); + kpi_ = new java.util.ArrayList<monitoring.Monitoring.Kpi>(); mutable_bitField0_ |= 0x00000001; } - kpiList_.add( + kpi_.add( input.readMessage(monitoring.Monitoring.Kpi.parser(), extensionRegistry)); break; } @@ -8577,7 +11345,7 @@ public final class Monitoring { e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { - kpiList_ = java.util.Collections.unmodifiableList(kpiList_); + kpi_ = java.util.Collections.unmodifiableList(kpi_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -8596,44 +11364,44 @@ public final class Monitoring { monitoring.Monitoring.KpiList.class, monitoring.Monitoring.KpiList.Builder.class); } - public static final int KPI_LIST_FIELD_NUMBER = 1; - private java.util.List<monitoring.Monitoring.Kpi> kpiList_; + public static final int KPI_FIELD_NUMBER = 1; + private java.util.List<monitoring.Monitoring.Kpi> kpi_; /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ @java.lang.Override - public java.util.List<monitoring.Monitoring.Kpi> getKpiListList() { - return kpiList_; + public java.util.List<monitoring.Monitoring.Kpi> getKpiList() { + return kpi_; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ @java.lang.Override public java.util.List<? extends monitoring.Monitoring.KpiOrBuilder> - getKpiListOrBuilderList() { - return kpiList_; + getKpiOrBuilderList() { + return kpi_; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ @java.lang.Override - public int getKpiListCount() { - return kpiList_.size(); + public int getKpiCount() { + return kpi_.size(); } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.Kpi getKpiList(int index) { - return kpiList_.get(index); + public monitoring.Monitoring.Kpi getKpi(int index) { + return kpi_.get(index); } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiOrBuilder getKpiListOrBuilder( + public monitoring.Monitoring.KpiOrBuilder getKpiOrBuilder( int index) { - return kpiList_.get(index); + return kpi_.get(index); } private byte memoizedIsInitialized = -1; @@ -8650,8 +11418,8 @@ public final class Monitoring { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < kpiList_.size(); i++) { - output.writeMessage(1, kpiList_.get(i)); + for (int i = 0; i < kpi_.size(); i++) { + output.writeMessage(1, kpi_.get(i)); } unknownFields.writeTo(output); } @@ -8662,9 +11430,9 @@ public final class Monitoring { if (size != -1) return size; size = 0; - for (int i = 0; i < kpiList_.size(); i++) { + for (int i = 0; i < kpi_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, kpiList_.get(i)); + .computeMessageSize(1, kpi_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -8681,8 +11449,8 @@ public final class Monitoring { } monitoring.Monitoring.KpiList other = (monitoring.Monitoring.KpiList) obj; - if (!getKpiListList() - .equals(other.getKpiListList())) return false; + if (!getKpiList() + .equals(other.getKpiList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -8694,9 +11462,9 @@ public final class Monitoring { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (getKpiListCount() > 0) { - hash = (37 * hash) + KPI_LIST_FIELD_NUMBER; - hash = (53 * hash) + getKpiListList().hashCode(); + if (getKpiCount() > 0) { + hash = (37 * hash) + KPI_FIELD_NUMBER; + hash = (53 * hash) + getKpiList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -8826,17 +11594,17 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getKpiListFieldBuilder(); + getKpiFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (kpiListBuilder_ == null) { - kpiList_ = java.util.Collections.emptyList(); + if (kpiBuilder_ == null) { + kpi_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - kpiListBuilder_.clear(); + kpiBuilder_.clear(); } return this; } @@ -8865,14 +11633,14 @@ public final class Monitoring { public monitoring.Monitoring.KpiList buildPartial() { monitoring.Monitoring.KpiList result = new monitoring.Monitoring.KpiList(this); int from_bitField0_ = bitField0_; - if (kpiListBuilder_ == null) { + if (kpiBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { - kpiList_ = java.util.Collections.unmodifiableList(kpiList_); + kpi_ = java.util.Collections.unmodifiableList(kpi_); bitField0_ = (bitField0_ & ~0x00000001); } - result.kpiList_ = kpiList_; + result.kpi_ = kpi_; } else { - result.kpiList_ = kpiListBuilder_.build(); + result.kpi_ = kpiBuilder_.build(); } onBuilt(); return result; @@ -8922,29 +11690,29 @@ public final class Monitoring { public Builder mergeFrom(monitoring.Monitoring.KpiList other) { if (other == monitoring.Monitoring.KpiList.getDefaultInstance()) return this; - if (kpiListBuilder_ == null) { - if (!other.kpiList_.isEmpty()) { - if (kpiList_.isEmpty()) { - kpiList_ = other.kpiList_; + if (kpiBuilder_ == null) { + if (!other.kpi_.isEmpty()) { + if (kpi_.isEmpty()) { + kpi_ = other.kpi_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureKpiListIsMutable(); - kpiList_.addAll(other.kpiList_); + ensureKpiIsMutable(); + kpi_.addAll(other.kpi_); } onChanged(); } } else { - if (!other.kpiList_.isEmpty()) { - if (kpiListBuilder_.isEmpty()) { - kpiListBuilder_.dispose(); - kpiListBuilder_ = null; - kpiList_ = other.kpiList_; + if (!other.kpi_.isEmpty()) { + if (kpiBuilder_.isEmpty()) { + kpiBuilder_.dispose(); + kpiBuilder_ = null; + kpi_ = other.kpi_; bitField0_ = (bitField0_ & ~0x00000001); - kpiListBuilder_ = + kpiBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getKpiListFieldBuilder() : null; + getKpiFieldBuilder() : null; } else { - kpiListBuilder_.addAllMessages(other.kpiList_); + kpiBuilder_.addAllMessages(other.kpi_); } } } @@ -8978,244 +11746,244 @@ public final class Monitoring { } private int bitField0_; - private java.util.List<monitoring.Monitoring.Kpi> kpiList_ = + private java.util.List<monitoring.Monitoring.Kpi> kpi_ = java.util.Collections.emptyList(); - private void ensureKpiListIsMutable() { + private void ensureKpiIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { - kpiList_ = new java.util.ArrayList<monitoring.Monitoring.Kpi>(kpiList_); + kpi_ = new java.util.ArrayList<monitoring.Monitoring.Kpi>(kpi_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.Kpi, monitoring.Monitoring.Kpi.Builder, monitoring.Monitoring.KpiOrBuilder> kpiListBuilder_; + monitoring.Monitoring.Kpi, monitoring.Monitoring.Kpi.Builder, monitoring.Monitoring.KpiOrBuilder> kpiBuilder_; /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public java.util.List<monitoring.Monitoring.Kpi> getKpiListList() { - if (kpiListBuilder_ == null) { - return java.util.Collections.unmodifiableList(kpiList_); + public java.util.List<monitoring.Monitoring.Kpi> getKpiList() { + if (kpiBuilder_ == null) { + return java.util.Collections.unmodifiableList(kpi_); } else { - return kpiListBuilder_.getMessageList(); + return kpiBuilder_.getMessageList(); } } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public int getKpiListCount() { - if (kpiListBuilder_ == null) { - return kpiList_.size(); + public int getKpiCount() { + if (kpiBuilder_ == null) { + return kpi_.size(); } else { - return kpiListBuilder_.getCount(); + return kpiBuilder_.getCount(); } } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public monitoring.Monitoring.Kpi getKpiList(int index) { - if (kpiListBuilder_ == null) { - return kpiList_.get(index); + public monitoring.Monitoring.Kpi getKpi(int index) { + if (kpiBuilder_ == null) { + return kpi_.get(index); } else { - return kpiListBuilder_.getMessage(index); + return kpiBuilder_.getMessage(index); } } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder setKpiList( + public Builder setKpi( int index, monitoring.Monitoring.Kpi value) { - if (kpiListBuilder_ == null) { + if (kpiBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiListIsMutable(); - kpiList_.set(index, value); + ensureKpiIsMutable(); + kpi_.set(index, value); onChanged(); } else { - kpiListBuilder_.setMessage(index, value); + kpiBuilder_.setMessage(index, value); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder setKpiList( + public Builder setKpi( int index, monitoring.Monitoring.Kpi.Builder builderForValue) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.set(index, builderForValue.build()); + if (kpiBuilder_ == null) { + ensureKpiIsMutable(); + kpi_.set(index, builderForValue.build()); onChanged(); } else { - kpiListBuilder_.setMessage(index, builderForValue.build()); + kpiBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder addKpiList(monitoring.Monitoring.Kpi value) { - if (kpiListBuilder_ == null) { + public Builder addKpi(monitoring.Monitoring.Kpi value) { + if (kpiBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiListIsMutable(); - kpiList_.add(value); + ensureKpiIsMutable(); + kpi_.add(value); onChanged(); } else { - kpiListBuilder_.addMessage(value); + kpiBuilder_.addMessage(value); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder addKpiList( + public Builder addKpi( int index, monitoring.Monitoring.Kpi value) { - if (kpiListBuilder_ == null) { + if (kpiBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiListIsMutable(); - kpiList_.add(index, value); + ensureKpiIsMutable(); + kpi_.add(index, value); onChanged(); } else { - kpiListBuilder_.addMessage(index, value); + kpiBuilder_.addMessage(index, value); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder addKpiList( + public Builder addKpi( monitoring.Monitoring.Kpi.Builder builderForValue) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.add(builderForValue.build()); + if (kpiBuilder_ == null) { + ensureKpiIsMutable(); + kpi_.add(builderForValue.build()); onChanged(); } else { - kpiListBuilder_.addMessage(builderForValue.build()); + kpiBuilder_.addMessage(builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder addKpiList( + public Builder addKpi( int index, monitoring.Monitoring.Kpi.Builder builderForValue) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.add(index, builderForValue.build()); + if (kpiBuilder_ == null) { + ensureKpiIsMutable(); + kpi_.add(index, builderForValue.build()); onChanged(); } else { - kpiListBuilder_.addMessage(index, builderForValue.build()); + kpiBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder addAllKpiList( + public Builder addAllKpi( java.lang.Iterable<? extends monitoring.Monitoring.Kpi> values) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); + if (kpiBuilder_ == null) { + ensureKpiIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, kpiList_); + values, kpi_); onChanged(); } else { - kpiListBuilder_.addAllMessages(values); + kpiBuilder_.addAllMessages(values); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder clearKpiList() { - if (kpiListBuilder_ == null) { - kpiList_ = java.util.Collections.emptyList(); + public Builder clearKpi() { + if (kpiBuilder_ == null) { + kpi_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - kpiListBuilder_.clear(); + kpiBuilder_.clear(); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public Builder removeKpiList(int index) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.remove(index); + public Builder removeKpi(int index) { + if (kpiBuilder_ == null) { + ensureKpiIsMutable(); + kpi_.remove(index); onChanged(); } else { - kpiListBuilder_.remove(index); + kpiBuilder_.remove(index); } return this; } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public monitoring.Monitoring.Kpi.Builder getKpiListBuilder( + public monitoring.Monitoring.Kpi.Builder getKpiBuilder( int index) { - return getKpiListFieldBuilder().getBuilder(index); + return getKpiFieldBuilder().getBuilder(index); } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public monitoring.Monitoring.KpiOrBuilder getKpiListOrBuilder( + public monitoring.Monitoring.KpiOrBuilder getKpiOrBuilder( int index) { - if (kpiListBuilder_ == null) { - return kpiList_.get(index); } else { - return kpiListBuilder_.getMessageOrBuilder(index); + if (kpiBuilder_ == null) { + return kpi_.get(index); } else { + return kpiBuilder_.getMessageOrBuilder(index); } } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ public java.util.List<? extends monitoring.Monitoring.KpiOrBuilder> - getKpiListOrBuilderList() { - if (kpiListBuilder_ != null) { - return kpiListBuilder_.getMessageOrBuilderList(); + getKpiOrBuilderList() { + if (kpiBuilder_ != null) { + return kpiBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(kpiList_); + return java.util.Collections.unmodifiableList(kpi_); } } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public monitoring.Monitoring.Kpi.Builder addKpiListBuilder() { - return getKpiListFieldBuilder().addBuilder( + public monitoring.Monitoring.Kpi.Builder addKpiBuilder() { + return getKpiFieldBuilder().addBuilder( monitoring.Monitoring.Kpi.getDefaultInstance()); } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ - public monitoring.Monitoring.Kpi.Builder addKpiListBuilder( + public monitoring.Monitoring.Kpi.Builder addKpiBuilder( int index) { - return getKpiListFieldBuilder().addBuilder( + return getKpiFieldBuilder().addBuilder( index, monitoring.Monitoring.Kpi.getDefaultInstance()); } /** - * <code>repeated .monitoring.Kpi kpi_list = 1;</code> + * <code>repeated .monitoring.Kpi kpi = 1;</code> */ public java.util.List<monitoring.Monitoring.Kpi.Builder> - getKpiListBuilderList() { - return getKpiListFieldBuilder().getBuilderList(); + getKpiBuilderList() { + return getKpiFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< monitoring.Monitoring.Kpi, monitoring.Monitoring.Kpi.Builder, monitoring.Monitoring.KpiOrBuilder> - getKpiListFieldBuilder() { - if (kpiListBuilder_ == null) { - kpiListBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + getKpiFieldBuilder() { + if (kpiBuilder_ == null) { + kpiBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< monitoring.Monitoring.Kpi, monitoring.Monitoring.Kpi.Builder, monitoring.Monitoring.KpiOrBuilder>( - kpiList_, + kpi_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - kpiList_ = null; + kpi_ = null; } - return kpiListBuilder_; + return kpiBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -12198,28 +14966,19 @@ public final class Monitoring { monitoring.Monitoring.SubscriptionIDOrBuilder getSubsIdOrBuilder(); /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - java.util.List<monitoring.Monitoring.KpiList> - getKpiListList(); - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - monitoring.Monitoring.KpiList getKpiList(int index); - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return Whether the kpiList field is set. */ - int getKpiListCount(); + boolean hasKpiList(); /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return The kpiList. */ - java.util.List<? extends monitoring.Monitoring.KpiListOrBuilder> - getKpiListOrBuilderList(); + monitoring.Monitoring.KpiList getKpiList(); /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder( - int index); + monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder(); } /** * Protobuf type {@code monitoring.SubsResponse} @@ -12234,7 +14993,6 @@ public final class Monitoring { super(builder); } private SubsResponse() { - kpiList_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -12257,7 +15015,6 @@ public final class Monitoring { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -12282,12 +15039,16 @@ public final class Monitoring { break; } case 18: { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - kpiList_ = new java.util.ArrayList<monitoring.Monitoring.KpiList>(); - mutable_bitField0_ |= 0x00000001; + monitoring.Monitoring.KpiList.Builder subBuilder = null; + if (kpiList_ != null) { + subBuilder = kpiList_.toBuilder(); + } + kpiList_ = input.readMessage(monitoring.Monitoring.KpiList.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(kpiList_); + kpiList_ = subBuilder.buildPartial(); } - kpiList_.add( - input.readMessage(monitoring.Monitoring.KpiList.parser(), extensionRegistry)); + break; } default: { @@ -12305,9 +15066,6 @@ public final class Monitoring { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - kpiList_ = java.util.Collections.unmodifiableList(kpiList_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -12352,43 +15110,29 @@ public final class Monitoring { } public static final int KPI_LIST_FIELD_NUMBER = 2; - private java.util.List<monitoring.Monitoring.KpiList> kpiList_; - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - @java.lang.Override - public java.util.List<monitoring.Monitoring.KpiList> getKpiListList() { - return kpiList_; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - @java.lang.Override - public java.util.List<? extends monitoring.Monitoring.KpiListOrBuilder> - getKpiListOrBuilderList() { - return kpiList_; - } + private monitoring.Monitoring.KpiList kpiList_; /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return Whether the kpiList field is set. */ @java.lang.Override - public int getKpiListCount() { - return kpiList_.size(); + public boolean hasKpiList() { + return kpiList_ != null; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return The kpiList. */ @java.lang.Override - public monitoring.Monitoring.KpiList getKpiList(int index) { - return kpiList_.get(index); + public monitoring.Monitoring.KpiList getKpiList() { + return kpiList_ == null ? monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder( - int index) { - return kpiList_.get(index); + public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder() { + return getKpiList(); } private byte memoizedIsInitialized = -1; @@ -12408,8 +15152,8 @@ public final class Monitoring { if (subsId_ != null) { output.writeMessage(1, getSubsId()); } - for (int i = 0; i < kpiList_.size(); i++) { - output.writeMessage(2, kpiList_.get(i)); + if (kpiList_ != null) { + output.writeMessage(2, getKpiList()); } unknownFields.writeTo(output); } @@ -12424,9 +15168,9 @@ public final class Monitoring { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, getSubsId()); } - for (int i = 0; i < kpiList_.size(); i++) { + if (kpiList_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, kpiList_.get(i)); + .computeMessageSize(2, getKpiList()); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -12448,8 +15192,11 @@ public final class Monitoring { if (!getSubsId() .equals(other.getSubsId())) return false; } - if (!getKpiListList() - .equals(other.getKpiListList())) return false; + if (hasKpiList() != other.hasKpiList()) return false; + if (hasKpiList()) { + if (!getKpiList() + .equals(other.getKpiList())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -12465,9 +15212,9 @@ public final class Monitoring { hash = (37 * hash) + SUBS_ID_FIELD_NUMBER; hash = (53 * hash) + getSubsId().hashCode(); } - if (getKpiListCount() > 0) { + if (hasKpiList()) { hash = (37 * hash) + KPI_LIST_FIELD_NUMBER; - hash = (53 * hash) + getKpiListList().hashCode(); + hash = (53 * hash) + getKpiList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -12597,7 +15344,6 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getKpiListFieldBuilder(); } } @java.lang.Override @@ -12610,10 +15356,10 @@ public final class Monitoring { subsIdBuilder_ = null; } if (kpiListBuilder_ == null) { - kpiList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + kpiList_ = null; } else { - kpiListBuilder_.clear(); + kpiList_ = null; + kpiListBuilder_ = null; } return this; } @@ -12641,17 +15387,12 @@ public final class Monitoring { @java.lang.Override public monitoring.Monitoring.SubsResponse buildPartial() { monitoring.Monitoring.SubsResponse result = new monitoring.Monitoring.SubsResponse(this); - int from_bitField0_ = bitField0_; if (subsIdBuilder_ == null) { result.subsId_ = subsId_; } else { result.subsId_ = subsIdBuilder_.build(); } if (kpiListBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { - kpiList_ = java.util.Collections.unmodifiableList(kpiList_); - bitField0_ = (bitField0_ & ~0x00000001); - } result.kpiList_ = kpiList_; } else { result.kpiList_ = kpiListBuilder_.build(); @@ -12707,31 +15448,8 @@ public final class Monitoring { if (other.hasSubsId()) { mergeSubsId(other.getSubsId()); } - if (kpiListBuilder_ == null) { - if (!other.kpiList_.isEmpty()) { - if (kpiList_.isEmpty()) { - kpiList_ = other.kpiList_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureKpiListIsMutable(); - kpiList_.addAll(other.kpiList_); - } - onChanged(); - } - } else { - if (!other.kpiList_.isEmpty()) { - if (kpiListBuilder_.isEmpty()) { - kpiListBuilder_.dispose(); - kpiListBuilder_ = null; - kpiList_ = other.kpiList_; - bitField0_ = (bitField0_ & ~0x00000001); - kpiListBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getKpiListFieldBuilder() : null; - } else { - kpiListBuilder_.addAllMessages(other.kpiList_); - } - } + if (other.hasKpiList()) { + mergeKpiList(other.getKpiList()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -12761,7 +15479,6 @@ public final class Monitoring { } return this; } - private int bitField0_; private monitoring.Monitoring.SubscriptionID subsId_; private com.google.protobuf.SingleFieldBuilderV3< @@ -12882,239 +15599,118 @@ public final class Monitoring { return subsIdBuilder_; } - private java.util.List<monitoring.Monitoring.KpiList> kpiList_ = - java.util.Collections.emptyList(); - private void ensureKpiListIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - kpiList_ = new java.util.ArrayList<monitoring.Monitoring.KpiList>(kpiList_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< + private monitoring.Monitoring.KpiList kpiList_; + private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder> kpiListBuilder_; - - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public java.util.List<monitoring.Monitoring.KpiList> getKpiListList() { - if (kpiListBuilder_ == null) { - return java.util.Collections.unmodifiableList(kpiList_); - } else { - return kpiListBuilder_.getMessageList(); - } - } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return Whether the kpiList field is set. */ - public int getKpiListCount() { - if (kpiListBuilder_ == null) { - return kpiList_.size(); - } else { - return kpiListBuilder_.getCount(); - } + public boolean hasKpiList() { + return kpiListBuilder_ != null || kpiList_ != null; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> + * @return The kpiList. */ - public monitoring.Monitoring.KpiList getKpiList(int index) { + public monitoring.Monitoring.KpiList getKpiList() { if (kpiListBuilder_ == null) { - return kpiList_.get(index); + return kpiList_ == null ? monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } else { - return kpiListBuilder_.getMessage(index); + return kpiListBuilder_.getMessage(); } } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - public Builder setKpiList( - int index, monitoring.Monitoring.KpiList value) { + public Builder setKpiList(monitoring.Monitoring.KpiList value) { if (kpiListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiListIsMutable(); - kpiList_.set(index, value); + kpiList_ = value; onChanged(); } else { - kpiListBuilder_.setMessage(index, value); + kpiListBuilder_.setMessage(value); } + return this; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ public Builder setKpiList( - int index, monitoring.Monitoring.KpiList.Builder builderForValue) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.set(index, builderForValue.build()); - onChanged(); - } else { - kpiListBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public Builder addKpiList(monitoring.Monitoring.KpiList value) { - if (kpiListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiListIsMutable(); - kpiList_.add(value); - onChanged(); - } else { - kpiListBuilder_.addMessage(value); - } - return this; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public Builder addKpiList( - int index, monitoring.Monitoring.KpiList value) { - if (kpiListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiListIsMutable(); - kpiList_.add(index, value); - onChanged(); - } else { - kpiListBuilder_.addMessage(index, value); - } - return this; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public Builder addKpiList( monitoring.Monitoring.KpiList.Builder builderForValue) { if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.add(builderForValue.build()); - onChanged(); - } else { - kpiListBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public Builder addKpiList( - int index, monitoring.Monitoring.KpiList.Builder builderForValue) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.add(index, builderForValue.build()); + kpiList_ = builderForValue.build(); onChanged(); } else { - kpiListBuilder_.addMessage(index, builderForValue.build()); + kpiListBuilder_.setMessage(builderForValue.build()); } + return this; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - public Builder addAllKpiList( - java.lang.Iterable<? extends monitoring.Monitoring.KpiList> values) { + public Builder mergeKpiList(monitoring.Monitoring.KpiList value) { if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, kpiList_); + if (kpiList_ != null) { + kpiList_ = + monitoring.Monitoring.KpiList.newBuilder(kpiList_).mergeFrom(value).buildPartial(); + } else { + kpiList_ = value; + } onChanged(); } else { - kpiListBuilder_.addAllMessages(values); + kpiListBuilder_.mergeFrom(value); } + return this; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ public Builder clearKpiList() { if (kpiListBuilder_ == null) { - kpiList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - kpiListBuilder_.clear(); - } - return this; - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public Builder removeKpiList(int index) { - if (kpiListBuilder_ == null) { - ensureKpiListIsMutable(); - kpiList_.remove(index); + kpiList_ = null; onChanged(); } else { - kpiListBuilder_.remove(index); + kpiList_ = null; + kpiListBuilder_ = null; } + return this; } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public monitoring.Monitoring.KpiList.Builder getKpiListBuilder( - int index) { - return getKpiListFieldBuilder().getBuilder(index); - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder( - int index) { - if (kpiListBuilder_ == null) { - return kpiList_.get(index); } else { - return kpiListBuilder_.getMessageOrBuilder(index); - } + public monitoring.Monitoring.KpiList.Builder getKpiListBuilder() { + + onChanged(); + return getKpiListFieldBuilder().getBuilder(); } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - public java.util.List<? extends monitoring.Monitoring.KpiListOrBuilder> - getKpiListOrBuilderList() { + public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder() { if (kpiListBuilder_ != null) { - return kpiListBuilder_.getMessageOrBuilderList(); + return kpiListBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(kpiList_); + return kpiList_ == null ? + monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } } /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public monitoring.Monitoring.KpiList.Builder addKpiListBuilder() { - return getKpiListFieldBuilder().addBuilder( - monitoring.Monitoring.KpiList.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> - */ - public monitoring.Monitoring.KpiList.Builder addKpiListBuilder( - int index) { - return getKpiListFieldBuilder().addBuilder( - index, monitoring.Monitoring.KpiList.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiList kpi_list = 2;</code> + * <code>.monitoring.KpiList kpi_list = 2;</code> */ - public java.util.List<monitoring.Monitoring.KpiList.Builder> - getKpiListBuilderList() { - return getKpiListFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder> getKpiListFieldBuilder() { if (kpiListBuilder_ == null) { - kpiListBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + kpiListBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder>( - kpiList_, - ((bitField0_ & 0x00000001) != 0), + getKpiList(), getParentForChildren(), isClean()); kpiList_ = null; @@ -13174,55 +15770,55 @@ public final class Monitoring { } - public interface SubsIDListOrBuilder extends - // @@protoc_insertion_point(interface_extends:monitoring.SubsIDList) + public interface SubsListOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.SubsList) com.google.protobuf.MessageOrBuilder { /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - java.util.List<monitoring.Monitoring.SubscriptionID> - getSubsListList(); + java.util.List<monitoring.Monitoring.SubsDescriptor> + getSubsDescriptorList(); /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - monitoring.Monitoring.SubscriptionID getSubsList(int index); + monitoring.Monitoring.SubsDescriptor getSubsDescriptor(int index); /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - int getSubsListCount(); + int getSubsDescriptorCount(); /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - java.util.List<? extends monitoring.Monitoring.SubscriptionIDOrBuilder> - getSubsListOrBuilderList(); + java.util.List<? extends monitoring.Monitoring.SubsDescriptorOrBuilder> + getSubsDescriptorOrBuilderList(); /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - monitoring.Monitoring.SubscriptionIDOrBuilder getSubsListOrBuilder( + monitoring.Monitoring.SubsDescriptorOrBuilder getSubsDescriptorOrBuilder( int index); } /** - * Protobuf type {@code monitoring.SubsIDList} + * Protobuf type {@code monitoring.SubsList} */ - public static final class SubsIDList extends + public static final class SubsList extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:monitoring.SubsIDList) - SubsIDListOrBuilder { + // @@protoc_insertion_point(message_implements:monitoring.SubsList) + SubsListOrBuilder { private static final long serialVersionUID = 0L; - // Use SubsIDList.newBuilder() to construct. - private SubsIDList(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { + // Use SubsList.newBuilder() to construct. + private SubsList(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } - private SubsIDList() { - subsList_ = java.util.Collections.emptyList(); + private SubsList() { + subsDescriptor_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { - return new SubsIDList(); + return new SubsList(); } @java.lang.Override @@ -13230,7 +15826,7 @@ public final class Monitoring { getUnknownFields() { return this.unknownFields; } - private SubsIDList( + private SubsList( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -13251,11 +15847,11 @@ public final class Monitoring { break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - subsList_ = new java.util.ArrayList<monitoring.Monitoring.SubscriptionID>(); + subsDescriptor_ = new java.util.ArrayList<monitoring.Monitoring.SubsDescriptor>(); mutable_bitField0_ |= 0x00000001; } - subsList_.add( - input.readMessage(monitoring.Monitoring.SubscriptionID.parser(), extensionRegistry)); + subsDescriptor_.add( + input.readMessage(monitoring.Monitoring.SubsDescriptor.parser(), extensionRegistry)); break; } default: { @@ -13274,7 +15870,7 @@ public final class Monitoring { e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { - subsList_ = java.util.Collections.unmodifiableList(subsList_); + subsDescriptor_ = java.util.Collections.unmodifiableList(subsDescriptor_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -13282,55 +15878,55 @@ public final class Monitoring { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return monitoring.Monitoring.internal_static_monitoring_SubsIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_SubsList_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return monitoring.Monitoring.internal_static_monitoring_SubsIDList_fieldAccessorTable + return monitoring.Monitoring.internal_static_monitoring_SubsList_fieldAccessorTable .ensureFieldAccessorsInitialized( - monitoring.Monitoring.SubsIDList.class, monitoring.Monitoring.SubsIDList.Builder.class); + monitoring.Monitoring.SubsList.class, monitoring.Monitoring.SubsList.Builder.class); } - public static final int SUBS_LIST_FIELD_NUMBER = 1; - private java.util.List<monitoring.Monitoring.SubscriptionID> subsList_; + public static final int SUBS_DESCRIPTOR_FIELD_NUMBER = 1; + private java.util.List<monitoring.Monitoring.SubsDescriptor> subsDescriptor_; /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ @java.lang.Override - public java.util.List<monitoring.Monitoring.SubscriptionID> getSubsListList() { - return subsList_; + public java.util.List<monitoring.Monitoring.SubsDescriptor> getSubsDescriptorList() { + return subsDescriptor_; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ @java.lang.Override - public java.util.List<? extends monitoring.Monitoring.SubscriptionIDOrBuilder> - getSubsListOrBuilderList() { - return subsList_; + public java.util.List<? extends monitoring.Monitoring.SubsDescriptorOrBuilder> + getSubsDescriptorOrBuilderList() { + return subsDescriptor_; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ @java.lang.Override - public int getSubsListCount() { - return subsList_.size(); + public int getSubsDescriptorCount() { + return subsDescriptor_.size(); } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.SubscriptionID getSubsList(int index) { - return subsList_.get(index); + public monitoring.Monitoring.SubsDescriptor getSubsDescriptor(int index) { + return subsDescriptor_.get(index); } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.SubscriptionIDOrBuilder getSubsListOrBuilder( + public monitoring.Monitoring.SubsDescriptorOrBuilder getSubsDescriptorOrBuilder( int index) { - return subsList_.get(index); + return subsDescriptor_.get(index); } private byte memoizedIsInitialized = -1; @@ -13347,8 +15943,8 @@ public final class Monitoring { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < subsList_.size(); i++) { - output.writeMessage(1, subsList_.get(i)); + for (int i = 0; i < subsDescriptor_.size(); i++) { + output.writeMessage(1, subsDescriptor_.get(i)); } unknownFields.writeTo(output); } @@ -13359,9 +15955,9 @@ public final class Monitoring { if (size != -1) return size; size = 0; - for (int i = 0; i < subsList_.size(); i++) { + for (int i = 0; i < subsDescriptor_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, subsList_.get(i)); + .computeMessageSize(1, subsDescriptor_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -13373,13 +15969,13 @@ public final class Monitoring { if (obj == this) { return true; } - if (!(obj instanceof monitoring.Monitoring.SubsIDList)) { + if (!(obj instanceof monitoring.Monitoring.SubsList)) { return super.equals(obj); } - monitoring.Monitoring.SubsIDList other = (monitoring.Monitoring.SubsIDList) obj; + monitoring.Monitoring.SubsList other = (monitoring.Monitoring.SubsList) obj; - if (!getSubsListList() - .equals(other.getSubsListList())) return false; + if (!getSubsDescriptorList() + .equals(other.getSubsDescriptorList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -13391,78 +15987,78 @@ public final class Monitoring { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (getSubsListCount() > 0) { - hash = (37 * hash) + SUBS_LIST_FIELD_NUMBER; - hash = (53 * hash) + getSubsListList().hashCode(); + if (getSubsDescriptorCount() > 0) { + hash = (37 * hash) + SUBS_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getSubsDescriptorList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.SubsIDList parseFrom(byte[] data) + public static monitoring.Monitoring.SubsList parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.SubsIDList parseFrom(java.io.InputStream input) + public static monitoring.Monitoring.SubsList parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static monitoring.Monitoring.SubsIDList parseDelimitedFrom(java.io.InputStream input) + public static monitoring.Monitoring.SubsList parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static monitoring.Monitoring.SubsIDList parseDelimitedFrom( + public static monitoring.Monitoring.SubsList parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static monitoring.Monitoring.SubsIDList parseFrom( + public static monitoring.Monitoring.SubsList parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -13475,7 +16071,7 @@ public final class Monitoring { public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(monitoring.Monitoring.SubsIDList prototype) { + public static Builder newBuilder(monitoring.Monitoring.SubsList prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -13491,26 +16087,26 @@ public final class Monitoring { return builder; } /** - * Protobuf type {@code monitoring.SubsIDList} + * Protobuf type {@code monitoring.SubsList} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements - // @@protoc_insertion_point(builder_implements:monitoring.SubsIDList) - monitoring.Monitoring.SubsIDListOrBuilder { + // @@protoc_insertion_point(builder_implements:monitoring.SubsList) + monitoring.Monitoring.SubsListOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return monitoring.Monitoring.internal_static_monitoring_SubsIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_SubsList_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return monitoring.Monitoring.internal_static_monitoring_SubsIDList_fieldAccessorTable + return monitoring.Monitoring.internal_static_monitoring_SubsList_fieldAccessorTable .ensureFieldAccessorsInitialized( - monitoring.Monitoring.SubsIDList.class, monitoring.Monitoring.SubsIDList.Builder.class); + monitoring.Monitoring.SubsList.class, monitoring.Monitoring.SubsList.Builder.class); } - // Construct using monitoring.Monitoring.SubsIDList.newBuilder() + // Construct using monitoring.Monitoring.SubsList.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -13523,17 +16119,17 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getSubsListFieldBuilder(); + getSubsDescriptorFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (subsListBuilder_ == null) { - subsList_ = java.util.Collections.emptyList(); + if (subsDescriptorBuilder_ == null) { + subsDescriptor_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - subsListBuilder_.clear(); + subsDescriptorBuilder_.clear(); } return this; } @@ -13541,17 +16137,17 @@ public final class Monitoring { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return monitoring.Monitoring.internal_static_monitoring_SubsIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_SubsList_descriptor; } @java.lang.Override - public monitoring.Monitoring.SubsIDList getDefaultInstanceForType() { - return monitoring.Monitoring.SubsIDList.getDefaultInstance(); + public monitoring.Monitoring.SubsList getDefaultInstanceForType() { + return monitoring.Monitoring.SubsList.getDefaultInstance(); } @java.lang.Override - public monitoring.Monitoring.SubsIDList build() { - monitoring.Monitoring.SubsIDList result = buildPartial(); + public monitoring.Monitoring.SubsList build() { + monitoring.Monitoring.SubsList result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -13559,17 +16155,17 @@ public final class Monitoring { } @java.lang.Override - public monitoring.Monitoring.SubsIDList buildPartial() { - monitoring.Monitoring.SubsIDList result = new monitoring.Monitoring.SubsIDList(this); + public monitoring.Monitoring.SubsList buildPartial() { + monitoring.Monitoring.SubsList result = new monitoring.Monitoring.SubsList(this); int from_bitField0_ = bitField0_; - if (subsListBuilder_ == null) { + if (subsDescriptorBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { - subsList_ = java.util.Collections.unmodifiableList(subsList_); + subsDescriptor_ = java.util.Collections.unmodifiableList(subsDescriptor_); bitField0_ = (bitField0_ & ~0x00000001); } - result.subsList_ = subsList_; + result.subsDescriptor_ = subsDescriptor_; } else { - result.subsList_ = subsListBuilder_.build(); + result.subsDescriptor_ = subsDescriptorBuilder_.build(); } onBuilt(); return result; @@ -13609,39 +16205,39 @@ public final class Monitoring { } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof monitoring.Monitoring.SubsIDList) { - return mergeFrom((monitoring.Monitoring.SubsIDList)other); + if (other instanceof monitoring.Monitoring.SubsList) { + return mergeFrom((monitoring.Monitoring.SubsList)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(monitoring.Monitoring.SubsIDList other) { - if (other == monitoring.Monitoring.SubsIDList.getDefaultInstance()) return this; - if (subsListBuilder_ == null) { - if (!other.subsList_.isEmpty()) { - if (subsList_.isEmpty()) { - subsList_ = other.subsList_; + public Builder mergeFrom(monitoring.Monitoring.SubsList other) { + if (other == monitoring.Monitoring.SubsList.getDefaultInstance()) return this; + if (subsDescriptorBuilder_ == null) { + if (!other.subsDescriptor_.isEmpty()) { + if (subsDescriptor_.isEmpty()) { + subsDescriptor_ = other.subsDescriptor_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureSubsListIsMutable(); - subsList_.addAll(other.subsList_); + ensureSubsDescriptorIsMutable(); + subsDescriptor_.addAll(other.subsDescriptor_); } onChanged(); } } else { - if (!other.subsList_.isEmpty()) { - if (subsListBuilder_.isEmpty()) { - subsListBuilder_.dispose(); - subsListBuilder_ = null; - subsList_ = other.subsList_; + if (!other.subsDescriptor_.isEmpty()) { + if (subsDescriptorBuilder_.isEmpty()) { + subsDescriptorBuilder_.dispose(); + subsDescriptorBuilder_ = null; + subsDescriptor_ = other.subsDescriptor_; bitField0_ = (bitField0_ & ~0x00000001); - subsListBuilder_ = + subsDescriptorBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getSubsListFieldBuilder() : null; + getSubsDescriptorFieldBuilder() : null; } else { - subsListBuilder_.addAllMessages(other.subsList_); + subsDescriptorBuilder_.addAllMessages(other.subsDescriptor_); } } } @@ -13660,11 +16256,11 @@ public final class Monitoring { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - monitoring.Monitoring.SubsIDList parsedMessage = null; + monitoring.Monitoring.SubsList parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (monitoring.Monitoring.SubsIDList) e.getUnfinishedMessage(); + parsedMessage = (monitoring.Monitoring.SubsList) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -13675,244 +16271,244 @@ public final class Monitoring { } private int bitField0_; - private java.util.List<monitoring.Monitoring.SubscriptionID> subsList_ = + private java.util.List<monitoring.Monitoring.SubsDescriptor> subsDescriptor_ = java.util.Collections.emptyList(); - private void ensureSubsListIsMutable() { + private void ensureSubsDescriptorIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { - subsList_ = new java.util.ArrayList<monitoring.Monitoring.SubscriptionID>(subsList_); + subsDescriptor_ = new java.util.ArrayList<monitoring.Monitoring.SubsDescriptor>(subsDescriptor_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.SubscriptionID, monitoring.Monitoring.SubscriptionID.Builder, monitoring.Monitoring.SubscriptionIDOrBuilder> subsListBuilder_; + monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.SubsDescriptor.Builder, monitoring.Monitoring.SubsDescriptorOrBuilder> subsDescriptorBuilder_; /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public java.util.List<monitoring.Monitoring.SubscriptionID> getSubsListList() { - if (subsListBuilder_ == null) { - return java.util.Collections.unmodifiableList(subsList_); + public java.util.List<monitoring.Monitoring.SubsDescriptor> getSubsDescriptorList() { + if (subsDescriptorBuilder_ == null) { + return java.util.Collections.unmodifiableList(subsDescriptor_); } else { - return subsListBuilder_.getMessageList(); + return subsDescriptorBuilder_.getMessageList(); } } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public int getSubsListCount() { - if (subsListBuilder_ == null) { - return subsList_.size(); + public int getSubsDescriptorCount() { + if (subsDescriptorBuilder_ == null) { + return subsDescriptor_.size(); } else { - return subsListBuilder_.getCount(); + return subsDescriptorBuilder_.getCount(); } } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public monitoring.Monitoring.SubscriptionID getSubsList(int index) { - if (subsListBuilder_ == null) { - return subsList_.get(index); + public monitoring.Monitoring.SubsDescriptor getSubsDescriptor(int index) { + if (subsDescriptorBuilder_ == null) { + return subsDescriptor_.get(index); } else { - return subsListBuilder_.getMessage(index); + return subsDescriptorBuilder_.getMessage(index); } } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder setSubsList( - int index, monitoring.Monitoring.SubscriptionID value) { - if (subsListBuilder_ == null) { + public Builder setSubsDescriptor( + int index, monitoring.Monitoring.SubsDescriptor value) { + if (subsDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSubsListIsMutable(); - subsList_.set(index, value); + ensureSubsDescriptorIsMutable(); + subsDescriptor_.set(index, value); onChanged(); } else { - subsListBuilder_.setMessage(index, value); + subsDescriptorBuilder_.setMessage(index, value); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder setSubsList( - int index, monitoring.Monitoring.SubscriptionID.Builder builderForValue) { - if (subsListBuilder_ == null) { - ensureSubsListIsMutable(); - subsList_.set(index, builderForValue.build()); + public Builder setSubsDescriptor( + int index, monitoring.Monitoring.SubsDescriptor.Builder builderForValue) { + if (subsDescriptorBuilder_ == null) { + ensureSubsDescriptorIsMutable(); + subsDescriptor_.set(index, builderForValue.build()); onChanged(); } else { - subsListBuilder_.setMessage(index, builderForValue.build()); + subsDescriptorBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder addSubsList(monitoring.Monitoring.SubscriptionID value) { - if (subsListBuilder_ == null) { + public Builder addSubsDescriptor(monitoring.Monitoring.SubsDescriptor value) { + if (subsDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSubsListIsMutable(); - subsList_.add(value); + ensureSubsDescriptorIsMutable(); + subsDescriptor_.add(value); onChanged(); } else { - subsListBuilder_.addMessage(value); + subsDescriptorBuilder_.addMessage(value); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder addSubsList( - int index, monitoring.Monitoring.SubscriptionID value) { - if (subsListBuilder_ == null) { + public Builder addSubsDescriptor( + int index, monitoring.Monitoring.SubsDescriptor value) { + if (subsDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureSubsListIsMutable(); - subsList_.add(index, value); + ensureSubsDescriptorIsMutable(); + subsDescriptor_.add(index, value); onChanged(); } else { - subsListBuilder_.addMessage(index, value); + subsDescriptorBuilder_.addMessage(index, value); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder addSubsList( - monitoring.Monitoring.SubscriptionID.Builder builderForValue) { - if (subsListBuilder_ == null) { - ensureSubsListIsMutable(); - subsList_.add(builderForValue.build()); + public Builder addSubsDescriptor( + monitoring.Monitoring.SubsDescriptor.Builder builderForValue) { + if (subsDescriptorBuilder_ == null) { + ensureSubsDescriptorIsMutable(); + subsDescriptor_.add(builderForValue.build()); onChanged(); } else { - subsListBuilder_.addMessage(builderForValue.build()); + subsDescriptorBuilder_.addMessage(builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder addSubsList( - int index, monitoring.Monitoring.SubscriptionID.Builder builderForValue) { - if (subsListBuilder_ == null) { - ensureSubsListIsMutable(); - subsList_.add(index, builderForValue.build()); + public Builder addSubsDescriptor( + int index, monitoring.Monitoring.SubsDescriptor.Builder builderForValue) { + if (subsDescriptorBuilder_ == null) { + ensureSubsDescriptorIsMutable(); + subsDescriptor_.add(index, builderForValue.build()); onChanged(); } else { - subsListBuilder_.addMessage(index, builderForValue.build()); + subsDescriptorBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder addAllSubsList( - java.lang.Iterable<? extends monitoring.Monitoring.SubscriptionID> values) { - if (subsListBuilder_ == null) { - ensureSubsListIsMutable(); + public Builder addAllSubsDescriptor( + java.lang.Iterable<? extends monitoring.Monitoring.SubsDescriptor> values) { + if (subsDescriptorBuilder_ == null) { + ensureSubsDescriptorIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, subsList_); + values, subsDescriptor_); onChanged(); } else { - subsListBuilder_.addAllMessages(values); + subsDescriptorBuilder_.addAllMessages(values); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder clearSubsList() { - if (subsListBuilder_ == null) { - subsList_ = java.util.Collections.emptyList(); + public Builder clearSubsDescriptor() { + if (subsDescriptorBuilder_ == null) { + subsDescriptor_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - subsListBuilder_.clear(); + subsDescriptorBuilder_.clear(); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public Builder removeSubsList(int index) { - if (subsListBuilder_ == null) { - ensureSubsListIsMutable(); - subsList_.remove(index); + public Builder removeSubsDescriptor(int index) { + if (subsDescriptorBuilder_ == null) { + ensureSubsDescriptorIsMutable(); + subsDescriptor_.remove(index); onChanged(); } else { - subsListBuilder_.remove(index); + subsDescriptorBuilder_.remove(index); } return this; } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public monitoring.Monitoring.SubscriptionID.Builder getSubsListBuilder( + public monitoring.Monitoring.SubsDescriptor.Builder getSubsDescriptorBuilder( int index) { - return getSubsListFieldBuilder().getBuilder(index); + return getSubsDescriptorFieldBuilder().getBuilder(index); } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public monitoring.Monitoring.SubscriptionIDOrBuilder getSubsListOrBuilder( + public monitoring.Monitoring.SubsDescriptorOrBuilder getSubsDescriptorOrBuilder( int index) { - if (subsListBuilder_ == null) { - return subsList_.get(index); } else { - return subsListBuilder_.getMessageOrBuilder(index); + if (subsDescriptorBuilder_ == null) { + return subsDescriptor_.get(index); } else { + return subsDescriptorBuilder_.getMessageOrBuilder(index); } } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public java.util.List<? extends monitoring.Monitoring.SubscriptionIDOrBuilder> - getSubsListOrBuilderList() { - if (subsListBuilder_ != null) { - return subsListBuilder_.getMessageOrBuilderList(); + public java.util.List<? extends monitoring.Monitoring.SubsDescriptorOrBuilder> + getSubsDescriptorOrBuilderList() { + if (subsDescriptorBuilder_ != null) { + return subsDescriptorBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(subsList_); + return java.util.Collections.unmodifiableList(subsDescriptor_); } } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public monitoring.Monitoring.SubscriptionID.Builder addSubsListBuilder() { - return getSubsListFieldBuilder().addBuilder( - monitoring.Monitoring.SubscriptionID.getDefaultInstance()); + public monitoring.Monitoring.SubsDescriptor.Builder addSubsDescriptorBuilder() { + return getSubsDescriptorFieldBuilder().addBuilder( + monitoring.Monitoring.SubsDescriptor.getDefaultInstance()); } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public monitoring.Monitoring.SubscriptionID.Builder addSubsListBuilder( + public monitoring.Monitoring.SubsDescriptor.Builder addSubsDescriptorBuilder( int index) { - return getSubsListFieldBuilder().addBuilder( - index, monitoring.Monitoring.SubscriptionID.getDefaultInstance()); + return getSubsDescriptorFieldBuilder().addBuilder( + index, monitoring.Monitoring.SubsDescriptor.getDefaultInstance()); } /** - * <code>repeated .monitoring.SubscriptionID subs_list = 1;</code> + * <code>repeated .monitoring.SubsDescriptor subs_descriptor = 1;</code> */ - public java.util.List<monitoring.Monitoring.SubscriptionID.Builder> - getSubsListBuilderList() { - return getSubsListFieldBuilder().getBuilderList(); + public java.util.List<monitoring.Monitoring.SubsDescriptor.Builder> + getSubsDescriptorBuilderList() { + return getSubsDescriptorFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.SubscriptionID, monitoring.Monitoring.SubscriptionID.Builder, monitoring.Monitoring.SubscriptionIDOrBuilder> - getSubsListFieldBuilder() { - if (subsListBuilder_ == null) { - subsListBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.SubscriptionID, monitoring.Monitoring.SubscriptionID.Builder, monitoring.Monitoring.SubscriptionIDOrBuilder>( - subsList_, + monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.SubsDescriptor.Builder, monitoring.Monitoring.SubsDescriptorOrBuilder> + getSubsDescriptorFieldBuilder() { + if (subsDescriptorBuilder_ == null) { + subsDescriptorBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.SubsDescriptor.Builder, monitoring.Monitoring.SubsDescriptorOrBuilder>( + subsDescriptor_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - subsList_ = null; + subsDescriptor_ = null; } - return subsListBuilder_; + return subsDescriptorBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -13927,41 +16523,41 @@ public final class Monitoring { } - // @@protoc_insertion_point(builder_scope:monitoring.SubsIDList) + // @@protoc_insertion_point(builder_scope:monitoring.SubsList) } - // @@protoc_insertion_point(class_scope:monitoring.SubsIDList) - private static final monitoring.Monitoring.SubsIDList DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:monitoring.SubsList) + private static final monitoring.Monitoring.SubsList DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new monitoring.Monitoring.SubsIDList(); + DEFAULT_INSTANCE = new monitoring.Monitoring.SubsList(); } - public static monitoring.Monitoring.SubsIDList getDefaultInstance() { + public static monitoring.Monitoring.SubsList getDefaultInstance() { return DEFAULT_INSTANCE; } - private static final com.google.protobuf.Parser<SubsIDList> - PARSER = new com.google.protobuf.AbstractParser<SubsIDList>() { + private static final com.google.protobuf.Parser<SubsList> + PARSER = new com.google.protobuf.AbstractParser<SubsList>() { @java.lang.Override - public SubsIDList parsePartialFrom( + public SubsList parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SubsIDList(input, extensionRegistry); + return new SubsList(input, extensionRegistry); } }; - public static com.google.protobuf.Parser<SubsIDList> parser() { + public static com.google.protobuf.Parser<SubsList> parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser<SubsIDList> getParserForType() { + public com.google.protobuf.Parser<SubsList> getParserForType() { return PARSER; } @java.lang.Override - public monitoring.Monitoring.SubsIDList getDefaultInstanceForType() { + public monitoring.Monitoring.SubsList getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -14011,52 +16607,34 @@ public final class Monitoring { getNameBytes(); /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - java.util.List<monitoring.Monitoring.KpiId> - getKpiIdList(); - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - monitoring.Monitoring.KpiId getKpiId(int index); - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return Whether the kpiId field is set. */ - int getKpiIdCount(); + boolean hasKpiId(); /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return The kpiId. */ - java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList(); + monitoring.Monitoring.KpiId getKpiId(); /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( - int index); + monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder(); /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - java.util.List<monitoring.Monitoring.KpiValueRange> - getKpiValueRangeList(); - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return Whether the kpiValueRange field is set. */ - monitoring.Monitoring.KpiValueRange getKpiValueRange(int index); + boolean hasKpiValueRange(); /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return The kpiValueRange. */ - int getKpiValueRangeCount(); + monitoring.Monitoring.KpiValueRange getKpiValueRange(); /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - java.util.List<? extends monitoring.Monitoring.KpiValueRangeOrBuilder> - getKpiValueRangeOrBuilderList(); - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder( - int index); + monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder(); /** * <code>.context.Timestamp timestamp = 6;</code> @@ -14088,8 +16666,6 @@ public final class Monitoring { private AlarmDescriptor() { alarmDescription_ = ""; name_ = ""; - kpiId_ = java.util.Collections.emptyList(); - kpiValueRange_ = java.util.Collections.emptyList(); } @java.lang.Override @@ -14112,7 +16688,6 @@ public final class Monitoring { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } - int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -14149,21 +16724,29 @@ public final class Monitoring { break; } case 34: { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - kpiId_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(); - mutable_bitField0_ |= 0x00000001; + monitoring.Monitoring.KpiId.Builder subBuilder = null; + if (kpiId_ != null) { + subBuilder = kpiId_.toBuilder(); } - kpiId_.add( - input.readMessage(monitoring.Monitoring.KpiId.parser(), extensionRegistry)); + kpiId_ = input.readMessage(monitoring.Monitoring.KpiId.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(kpiId_); + kpiId_ = subBuilder.buildPartial(); + } + break; } case 42: { - if (!((mutable_bitField0_ & 0x00000002) != 0)) { - kpiValueRange_ = new java.util.ArrayList<monitoring.Monitoring.KpiValueRange>(); - mutable_bitField0_ |= 0x00000002; + monitoring.Monitoring.KpiValueRange.Builder subBuilder = null; + if (kpiValueRange_ != null) { + subBuilder = kpiValueRange_.toBuilder(); } - kpiValueRange_.add( - input.readMessage(monitoring.Monitoring.KpiValueRange.parser(), extensionRegistry)); + kpiValueRange_ = input.readMessage(monitoring.Monitoring.KpiValueRange.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(kpiValueRange_); + kpiValueRange_ = subBuilder.buildPartial(); + } + break; } case 50: { @@ -14194,12 +16777,6 @@ public final class Monitoring { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - kpiId_ = java.util.Collections.unmodifiableList(kpiId_); - } - if (((mutable_bitField0_ & 0x00000002) != 0)) { - kpiValueRange_ = java.util.Collections.unmodifiableList(kpiValueRange_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -14319,84 +16896,56 @@ public final class Monitoring { } } - public static final int KPI_ID_FIELD_NUMBER = 4; - private java.util.List<monitoring.Monitoring.KpiId> kpiId_; - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - @java.lang.Override - public java.util.List<monitoring.Monitoring.KpiId> getKpiIdList() { - return kpiId_; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - @java.lang.Override - public java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList() { - return kpiId_; - } + public static final int KPI_ID_FIELD_NUMBER = 4; + private monitoring.Monitoring.KpiId kpiId_; /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return Whether the kpiId field is set. */ @java.lang.Override - public int getKpiIdCount() { - return kpiId_.size(); + public boolean hasKpiId() { + return kpiId_ != null; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return The kpiId. */ @java.lang.Override - public monitoring.Monitoring.KpiId getKpiId(int index) { - return kpiId_.get(index); + public monitoring.Monitoring.KpiId getKpiId() { + return kpiId_ == null ? monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( - int index) { - return kpiId_.get(index); + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder() { + return getKpiId(); } public static final int KPI_VALUE_RANGE_FIELD_NUMBER = 5; - private java.util.List<monitoring.Monitoring.KpiValueRange> kpiValueRange_; - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - @java.lang.Override - public java.util.List<monitoring.Monitoring.KpiValueRange> getKpiValueRangeList() { - return kpiValueRange_; - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - @java.lang.Override - public java.util.List<? extends monitoring.Monitoring.KpiValueRangeOrBuilder> - getKpiValueRangeOrBuilderList() { - return kpiValueRange_; - } + private monitoring.Monitoring.KpiValueRange kpiValueRange_; /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return Whether the kpiValueRange field is set. */ @java.lang.Override - public int getKpiValueRangeCount() { - return kpiValueRange_.size(); + public boolean hasKpiValueRange() { + return kpiValueRange_ != null; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return The kpiValueRange. */ @java.lang.Override - public monitoring.Monitoring.KpiValueRange getKpiValueRange(int index) { - return kpiValueRange_.get(index); + public monitoring.Monitoring.KpiValueRange getKpiValueRange() { + return kpiValueRange_ == null ? monitoring.Monitoring.KpiValueRange.getDefaultInstance() : kpiValueRange_; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ @java.lang.Override - public monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder( - int index) { - return kpiValueRange_.get(index); + public monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder() { + return getKpiValueRange(); } public static final int TIMESTAMP_FIELD_NUMBER = 6; @@ -14448,11 +16997,11 @@ public final class Monitoring { if (!getNameBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, name_); } - for (int i = 0; i < kpiId_.size(); i++) { - output.writeMessage(4, kpiId_.get(i)); + if (kpiId_ != null) { + output.writeMessage(4, getKpiId()); } - for (int i = 0; i < kpiValueRange_.size(); i++) { - output.writeMessage(5, kpiValueRange_.get(i)); + if (kpiValueRange_ != null) { + output.writeMessage(5, getKpiValueRange()); } if (timestamp_ != null) { output.writeMessage(6, getTimestamp()); @@ -14476,13 +17025,13 @@ public final class Monitoring { if (!getNameBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, name_); } - for (int i = 0; i < kpiId_.size(); i++) { + if (kpiId_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, kpiId_.get(i)); + .computeMessageSize(4, getKpiId()); } - for (int i = 0; i < kpiValueRange_.size(); i++) { + if (kpiValueRange_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(5, kpiValueRange_.get(i)); + .computeMessageSize(5, getKpiValueRange()); } if (timestamp_ != null) { size += com.google.protobuf.CodedOutputStream @@ -14512,10 +17061,16 @@ public final class Monitoring { .equals(other.getAlarmDescription())) return false; if (!getName() .equals(other.getName())) return false; - if (!getKpiIdList() - .equals(other.getKpiIdList())) return false; - if (!getKpiValueRangeList() - .equals(other.getKpiValueRangeList())) return false; + if (hasKpiId() != other.hasKpiId()) return false; + if (hasKpiId()) { + if (!getKpiId() + .equals(other.getKpiId())) return false; + } + if (hasKpiValueRange() != other.hasKpiValueRange()) return false; + if (hasKpiValueRange()) { + if (!getKpiValueRange() + .equals(other.getKpiValueRange())) return false; + } if (hasTimestamp() != other.hasTimestamp()) return false; if (hasTimestamp()) { if (!getTimestamp() @@ -14540,13 +17095,13 @@ public final class Monitoring { hash = (53 * hash) + getAlarmDescription().hashCode(); hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); - if (getKpiIdCount() > 0) { + if (hasKpiId()) { hash = (37 * hash) + KPI_ID_FIELD_NUMBER; - hash = (53 * hash) + getKpiIdList().hashCode(); + hash = (53 * hash) + getKpiId().hashCode(); } - if (getKpiValueRangeCount() > 0) { + if (hasKpiValueRange()) { hash = (37 * hash) + KPI_VALUE_RANGE_FIELD_NUMBER; - hash = (53 * hash) + getKpiValueRangeList().hashCode(); + hash = (53 * hash) + getKpiValueRange().hashCode(); } if (hasTimestamp()) { hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; @@ -14680,8 +17235,6 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getKpiIdFieldBuilder(); - getKpiValueRangeFieldBuilder(); } } @java.lang.Override @@ -14698,16 +17251,16 @@ public final class Monitoring { name_ = ""; if (kpiIdBuilder_ == null) { - kpiId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); + kpiId_ = null; } else { - kpiIdBuilder_.clear(); + kpiId_ = null; + kpiIdBuilder_ = null; } if (kpiValueRangeBuilder_ == null) { - kpiValueRange_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + kpiValueRange_ = null; } else { - kpiValueRangeBuilder_.clear(); + kpiValueRange_ = null; + kpiValueRangeBuilder_ = null; } if (timestampBuilder_ == null) { timestamp_ = null; @@ -14741,7 +17294,6 @@ public final class Monitoring { @java.lang.Override public monitoring.Monitoring.AlarmDescriptor buildPartial() { monitoring.Monitoring.AlarmDescriptor result = new monitoring.Monitoring.AlarmDescriptor(this); - int from_bitField0_ = bitField0_; if (alarmIdBuilder_ == null) { result.alarmId_ = alarmId_; } else { @@ -14750,19 +17302,11 @@ public final class Monitoring { result.alarmDescription_ = alarmDescription_; result.name_ = name_; if (kpiIdBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { - kpiId_ = java.util.Collections.unmodifiableList(kpiId_); - bitField0_ = (bitField0_ & ~0x00000001); - } result.kpiId_ = kpiId_; } else { result.kpiId_ = kpiIdBuilder_.build(); } if (kpiValueRangeBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0)) { - kpiValueRange_ = java.util.Collections.unmodifiableList(kpiValueRange_); - bitField0_ = (bitField0_ & ~0x00000002); - } result.kpiValueRange_ = kpiValueRange_; } else { result.kpiValueRange_ = kpiValueRangeBuilder_.build(); @@ -14831,57 +17375,11 @@ public final class Monitoring { name_ = other.name_; onChanged(); } - if (kpiIdBuilder_ == null) { - if (!other.kpiId_.isEmpty()) { - if (kpiId_.isEmpty()) { - kpiId_ = other.kpiId_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureKpiIdIsMutable(); - kpiId_.addAll(other.kpiId_); - } - onChanged(); - } - } else { - if (!other.kpiId_.isEmpty()) { - if (kpiIdBuilder_.isEmpty()) { - kpiIdBuilder_.dispose(); - kpiIdBuilder_ = null; - kpiId_ = other.kpiId_; - bitField0_ = (bitField0_ & ~0x00000001); - kpiIdBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getKpiIdFieldBuilder() : null; - } else { - kpiIdBuilder_.addAllMessages(other.kpiId_); - } - } + if (other.hasKpiId()) { + mergeKpiId(other.getKpiId()); } - if (kpiValueRangeBuilder_ == null) { - if (!other.kpiValueRange_.isEmpty()) { - if (kpiValueRange_.isEmpty()) { - kpiValueRange_ = other.kpiValueRange_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureKpiValueRangeIsMutable(); - kpiValueRange_.addAll(other.kpiValueRange_); - } - onChanged(); - } - } else { - if (!other.kpiValueRange_.isEmpty()) { - if (kpiValueRangeBuilder_.isEmpty()) { - kpiValueRangeBuilder_.dispose(); - kpiValueRangeBuilder_ = null; - kpiValueRange_ = other.kpiValueRange_; - bitField0_ = (bitField0_ & ~0x00000002); - kpiValueRangeBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getKpiValueRangeFieldBuilder() : null; - } else { - kpiValueRangeBuilder_.addAllMessages(other.kpiValueRange_); - } - } + if (other.hasKpiValueRange()) { + mergeKpiValueRange(other.getKpiValueRange()); } if (other.hasTimestamp()) { mergeTimestamp(other.getTimestamp()); @@ -14914,7 +17412,6 @@ public final class Monitoring { } return this; } - private int bitField0_; private monitoring.Monitoring.AlarmID alarmId_; private com.google.protobuf.SingleFieldBuilderV3< @@ -15187,239 +17684,118 @@ public final class Monitoring { return this; } - private java.util.List<monitoring.Monitoring.KpiId> kpiId_ = - java.util.Collections.emptyList(); - private void ensureKpiIdIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - kpiId_ = new java.util.ArrayList<monitoring.Monitoring.KpiId>(kpiId_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< + private monitoring.Monitoring.KpiId kpiId_; + private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> kpiIdBuilder_; - - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public java.util.List<monitoring.Monitoring.KpiId> getKpiIdList() { - if (kpiIdBuilder_ == null) { - return java.util.Collections.unmodifiableList(kpiId_); - } else { - return kpiIdBuilder_.getMessageList(); - } - } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return Whether the kpiId field is set. */ - public int getKpiIdCount() { - if (kpiIdBuilder_ == null) { - return kpiId_.size(); - } else { - return kpiIdBuilder_.getCount(); - } + public boolean hasKpiId() { + return kpiIdBuilder_ != null || kpiId_ != null; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> + * @return The kpiId. */ - public monitoring.Monitoring.KpiId getKpiId(int index) { + public monitoring.Monitoring.KpiId getKpiId() { if (kpiIdBuilder_ == null) { - return kpiId_.get(index); + return kpiId_ == null ? monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; } else { - return kpiIdBuilder_.getMessage(index); + return kpiIdBuilder_.getMessage(); } } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - public Builder setKpiId( - int index, monitoring.Monitoring.KpiId value) { + public Builder setKpiId(monitoring.Monitoring.KpiId value) { if (kpiIdBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureKpiIdIsMutable(); - kpiId_.set(index, value); + kpiId_ = value; onChanged(); } else { - kpiIdBuilder_.setMessage(index, value); + kpiIdBuilder_.setMessage(value); } + return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ public Builder setKpiId( - int index, monitoring.Monitoring.KpiId.Builder builderForValue) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.set(index, builderForValue.build()); - onChanged(); - } else { - kpiIdBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public Builder addKpiId(monitoring.Monitoring.KpiId value) { - if (kpiIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiIdIsMutable(); - kpiId_.add(value); - onChanged(); - } else { - kpiIdBuilder_.addMessage(value); - } - return this; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public Builder addKpiId( - int index, monitoring.Monitoring.KpiId value) { - if (kpiIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiIdIsMutable(); - kpiId_.add(index, value); - onChanged(); - } else { - kpiIdBuilder_.addMessage(index, value); - } - return this; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public Builder addKpiId( monitoring.Monitoring.KpiId.Builder builderForValue) { if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.add(builderForValue.build()); - onChanged(); - } else { - kpiIdBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public Builder addKpiId( - int index, monitoring.Monitoring.KpiId.Builder builderForValue) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.add(index, builderForValue.build()); + kpiId_ = builderForValue.build(); onChanged(); } else { - kpiIdBuilder_.addMessage(index, builderForValue.build()); + kpiIdBuilder_.setMessage(builderForValue.build()); } + return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - public Builder addAllKpiId( - java.lang.Iterable<? extends monitoring.Monitoring.KpiId> values) { + public Builder mergeKpiId(monitoring.Monitoring.KpiId value) { if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, kpiId_); + if (kpiId_ != null) { + kpiId_ = + monitoring.Monitoring.KpiId.newBuilder(kpiId_).mergeFrom(value).buildPartial(); + } else { + kpiId_ = value; + } onChanged(); } else { - kpiIdBuilder_.addAllMessages(values); + kpiIdBuilder_.mergeFrom(value); } + return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ public Builder clearKpiId() { if (kpiIdBuilder_ == null) { - kpiId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - kpiIdBuilder_.clear(); - } - return this; - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public Builder removeKpiId(int index) { - if (kpiIdBuilder_ == null) { - ensureKpiIdIsMutable(); - kpiId_.remove(index); + kpiId_ = null; onChanged(); } else { - kpiIdBuilder_.remove(index); + kpiId_ = null; + kpiIdBuilder_ = null; } + return this; } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public monitoring.Monitoring.KpiId.Builder getKpiIdBuilder( - int index) { - return getKpiIdFieldBuilder().getBuilder(index); - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder( - int index) { - if (kpiIdBuilder_ == null) { - return kpiId_.get(index); } else { - return kpiIdBuilder_.getMessageOrBuilder(index); - } + public monitoring.Monitoring.KpiId.Builder getKpiIdBuilder() { + + onChanged(); + return getKpiIdFieldBuilder().getBuilder(); } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - public java.util.List<? extends monitoring.Monitoring.KpiIdOrBuilder> - getKpiIdOrBuilderList() { + public monitoring.Monitoring.KpiIdOrBuilder getKpiIdOrBuilder() { if (kpiIdBuilder_ != null) { - return kpiIdBuilder_.getMessageOrBuilderList(); + return kpiIdBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(kpiId_); + return kpiId_ == null ? + monitoring.Monitoring.KpiId.getDefaultInstance() : kpiId_; } } /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public monitoring.Monitoring.KpiId.Builder addKpiIdBuilder() { - return getKpiIdFieldBuilder().addBuilder( - monitoring.Monitoring.KpiId.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> - */ - public monitoring.Monitoring.KpiId.Builder addKpiIdBuilder( - int index) { - return getKpiIdFieldBuilder().addBuilder( - index, monitoring.Monitoring.KpiId.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiId kpi_id = 4;</code> + * <code>.monitoring.KpiId kpi_id = 4;</code> */ - public java.util.List<monitoring.Monitoring.KpiId.Builder> - getKpiIdBuilderList() { - return getKpiIdFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder> getKpiIdFieldBuilder() { if (kpiIdBuilder_ == null) { - kpiIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + kpiIdBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiId.Builder, monitoring.Monitoring.KpiIdOrBuilder>( - kpiId_, - ((bitField0_ & 0x00000001) != 0), + getKpiId(), getParentForChildren(), isClean()); kpiId_ = null; @@ -15427,239 +17803,118 @@ public final class Monitoring { return kpiIdBuilder_; } - private java.util.List<monitoring.Monitoring.KpiValueRange> kpiValueRange_ = - java.util.Collections.emptyList(); - private void ensureKpiValueRangeIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { - kpiValueRange_ = new java.util.ArrayList<monitoring.Monitoring.KpiValueRange>(kpiValueRange_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.KpiValueRange, monitoring.Monitoring.KpiValueRange.Builder, monitoring.Monitoring.KpiValueRangeOrBuilder> kpiValueRangeBuilder_; - - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public java.util.List<monitoring.Monitoring.KpiValueRange> getKpiValueRangeList() { - if (kpiValueRangeBuilder_ == null) { - return java.util.Collections.unmodifiableList(kpiValueRange_); - } else { - return kpiValueRangeBuilder_.getMessageList(); - } - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public int getKpiValueRangeCount() { - if (kpiValueRangeBuilder_ == null) { - return kpiValueRange_.size(); - } else { - return kpiValueRangeBuilder_.getCount(); - } - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public monitoring.Monitoring.KpiValueRange getKpiValueRange(int index) { - if (kpiValueRangeBuilder_ == null) { - return kpiValueRange_.get(index); - } else { - return kpiValueRangeBuilder_.getMessage(index); - } - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public Builder setKpiValueRange( - int index, monitoring.Monitoring.KpiValueRange value) { - if (kpiValueRangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiValueRangeIsMutable(); - kpiValueRange_.set(index, value); - onChanged(); - } else { - kpiValueRangeBuilder_.setMessage(index, value); - } - return this; - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public Builder setKpiValueRange( - int index, monitoring.Monitoring.KpiValueRange.Builder builderForValue) { - if (kpiValueRangeBuilder_ == null) { - ensureKpiValueRangeIsMutable(); - kpiValueRange_.set(index, builderForValue.build()); - onChanged(); - } else { - kpiValueRangeBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public Builder addKpiValueRange(monitoring.Monitoring.KpiValueRange value) { - if (kpiValueRangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiValueRangeIsMutable(); - kpiValueRange_.add(value); - onChanged(); - } else { - kpiValueRangeBuilder_.addMessage(value); - } - return this; - } + private monitoring.Monitoring.KpiValueRange kpiValueRange_; + private com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiValueRange, monitoring.Monitoring.KpiValueRange.Builder, monitoring.Monitoring.KpiValueRangeOrBuilder> kpiValueRangeBuilder_; /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return Whether the kpiValueRange field is set. */ - public Builder addKpiValueRange( - int index, monitoring.Monitoring.KpiValueRange value) { - if (kpiValueRangeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureKpiValueRangeIsMutable(); - kpiValueRange_.add(index, value); - onChanged(); - } else { - kpiValueRangeBuilder_.addMessage(index, value); - } - return this; + public boolean hasKpiValueRange() { + return kpiValueRangeBuilder_ != null || kpiValueRange_ != null; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> + * @return The kpiValueRange. */ - public Builder addKpiValueRange( - monitoring.Monitoring.KpiValueRange.Builder builderForValue) { + public monitoring.Monitoring.KpiValueRange getKpiValueRange() { if (kpiValueRangeBuilder_ == null) { - ensureKpiValueRangeIsMutable(); - kpiValueRange_.add(builderForValue.build()); - onChanged(); + return kpiValueRange_ == null ? monitoring.Monitoring.KpiValueRange.getDefaultInstance() : kpiValueRange_; } else { - kpiValueRangeBuilder_.addMessage(builderForValue.build()); + return kpiValueRangeBuilder_.getMessage(); } - return this; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public Builder addKpiValueRange( - int index, monitoring.Monitoring.KpiValueRange.Builder builderForValue) { + public Builder setKpiValueRange(monitoring.Monitoring.KpiValueRange value) { if (kpiValueRangeBuilder_ == null) { - ensureKpiValueRangeIsMutable(); - kpiValueRange_.add(index, builderForValue.build()); + if (value == null) { + throw new NullPointerException(); + } + kpiValueRange_ = value; onChanged(); } else { - kpiValueRangeBuilder_.addMessage(index, builderForValue.build()); + kpiValueRangeBuilder_.setMessage(value); } + return this; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public Builder addAllKpiValueRange( - java.lang.Iterable<? extends monitoring.Monitoring.KpiValueRange> values) { + public Builder setKpiValueRange( + monitoring.Monitoring.KpiValueRange.Builder builderForValue) { if (kpiValueRangeBuilder_ == null) { - ensureKpiValueRangeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, kpiValueRange_); + kpiValueRange_ = builderForValue.build(); onChanged(); } else { - kpiValueRangeBuilder_.addAllMessages(values); + kpiValueRangeBuilder_.setMessage(builderForValue.build()); } + return this; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public Builder clearKpiValueRange() { + public Builder mergeKpiValueRange(monitoring.Monitoring.KpiValueRange value) { if (kpiValueRangeBuilder_ == null) { - kpiValueRange_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + if (kpiValueRange_ != null) { + kpiValueRange_ = + monitoring.Monitoring.KpiValueRange.newBuilder(kpiValueRange_).mergeFrom(value).buildPartial(); + } else { + kpiValueRange_ = value; + } onChanged(); } else { - kpiValueRangeBuilder_.clear(); + kpiValueRangeBuilder_.mergeFrom(value); } + return this; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public Builder removeKpiValueRange(int index) { + public Builder clearKpiValueRange() { if (kpiValueRangeBuilder_ == null) { - ensureKpiValueRangeIsMutable(); - kpiValueRange_.remove(index); + kpiValueRange_ = null; onChanged(); } else { - kpiValueRangeBuilder_.remove(index); + kpiValueRange_ = null; + kpiValueRangeBuilder_ = null; } + return this; } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public monitoring.Monitoring.KpiValueRange.Builder getKpiValueRangeBuilder( - int index) { - return getKpiValueRangeFieldBuilder().getBuilder(index); - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder( - int index) { - if (kpiValueRangeBuilder_ == null) { - return kpiValueRange_.get(index); } else { - return kpiValueRangeBuilder_.getMessageOrBuilder(index); - } + public monitoring.Monitoring.KpiValueRange.Builder getKpiValueRangeBuilder() { + + onChanged(); + return getKpiValueRangeFieldBuilder().getBuilder(); } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public java.util.List<? extends monitoring.Monitoring.KpiValueRangeOrBuilder> - getKpiValueRangeOrBuilderList() { + public monitoring.Monitoring.KpiValueRangeOrBuilder getKpiValueRangeOrBuilder() { if (kpiValueRangeBuilder_ != null) { - return kpiValueRangeBuilder_.getMessageOrBuilderList(); + return kpiValueRangeBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(kpiValueRange_); + return kpiValueRange_ == null ? + monitoring.Monitoring.KpiValueRange.getDefaultInstance() : kpiValueRange_; } } /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public monitoring.Monitoring.KpiValueRange.Builder addKpiValueRangeBuilder() { - return getKpiValueRangeFieldBuilder().addBuilder( - monitoring.Monitoring.KpiValueRange.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> + * <code>.monitoring.KpiValueRange kpi_value_range = 5;</code> */ - public monitoring.Monitoring.KpiValueRange.Builder addKpiValueRangeBuilder( - int index) { - return getKpiValueRangeFieldBuilder().addBuilder( - index, monitoring.Monitoring.KpiValueRange.getDefaultInstance()); - } - /** - * <code>repeated .monitoring.KpiValueRange kpi_value_range = 5;</code> - */ - public java.util.List<monitoring.Monitoring.KpiValueRange.Builder> - getKpiValueRangeBuilderList() { - return getKpiValueRangeFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiValueRange, monitoring.Monitoring.KpiValueRange.Builder, monitoring.Monitoring.KpiValueRangeOrBuilder> getKpiValueRangeFieldBuilder() { if (kpiValueRangeBuilder_ == null) { - kpiValueRangeBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + kpiValueRangeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.KpiValueRange, monitoring.Monitoring.KpiValueRange.Builder, monitoring.Monitoring.KpiValueRangeOrBuilder>( - kpiValueRange_, - ((bitField0_ & 0x00000002) != 0), + getKpiValueRange(), getParentForChildren(), isClean()); kpiValueRange_ = null; @@ -16464,19 +18719,19 @@ public final class Monitoring { com.google.protobuf.MessageOrBuilder { /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return Whether the alarmID field is set. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return Whether the alarmId field is set. */ - boolean hasAlarmID(); + boolean hasAlarmId(); /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return The alarmID. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return The alarmId. */ - monitoring.Monitoring.AlarmID getAlarmID(); + monitoring.Monitoring.AlarmID getAlarmId(); /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - monitoring.Monitoring.AlarmIDOrBuilder getAlarmIDOrBuilder(); + monitoring.Monitoring.AlarmIDOrBuilder getAlarmIdOrBuilder(); /** * <code>float subscription_timeout_s = 2;</code> @@ -16537,13 +18792,13 @@ public final class Monitoring { break; case 10: { monitoring.Monitoring.AlarmID.Builder subBuilder = null; - if (alarmID_ != null) { - subBuilder = alarmID_.toBuilder(); + if (alarmId_ != null) { + subBuilder = alarmId_.toBuilder(); } - alarmID_ = input.readMessage(monitoring.Monitoring.AlarmID.parser(), extensionRegistry); + alarmId_ = input.readMessage(monitoring.Monitoring.AlarmID.parser(), extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(alarmID_); - alarmID_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(alarmId_); + alarmId_ = subBuilder.buildPartial(); } break; @@ -16590,30 +18845,30 @@ public final class Monitoring { monitoring.Monitoring.AlarmSubscription.class, monitoring.Monitoring.AlarmSubscription.Builder.class); } - public static final int ALARMID_FIELD_NUMBER = 1; - private monitoring.Monitoring.AlarmID alarmID_; + public static final int ALARM_ID_FIELD_NUMBER = 1; + private monitoring.Monitoring.AlarmID alarmId_; /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return Whether the alarmID field is set. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return Whether the alarmId field is set. */ @java.lang.Override - public boolean hasAlarmID() { - return alarmID_ != null; + public boolean hasAlarmId() { + return alarmId_ != null; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return The alarmID. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return The alarmId. */ @java.lang.Override - public monitoring.Monitoring.AlarmID getAlarmID() { - return alarmID_ == null ? monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmID_; + public monitoring.Monitoring.AlarmID getAlarmId() { + return alarmId_ == null ? monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmId_; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.AlarmIDOrBuilder getAlarmIDOrBuilder() { - return getAlarmID(); + public monitoring.Monitoring.AlarmIDOrBuilder getAlarmIdOrBuilder() { + return getAlarmId(); } public static final int SUBSCRIPTION_TIMEOUT_S_FIELD_NUMBER = 2; @@ -16652,8 +18907,8 @@ public final class Monitoring { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (alarmID_ != null) { - output.writeMessage(1, getAlarmID()); + if (alarmId_ != null) { + output.writeMessage(1, getAlarmId()); } if (subscriptionTimeoutS_ != 0F) { output.writeFloat(2, subscriptionTimeoutS_); @@ -16670,9 +18925,9 @@ public final class Monitoring { if (size != -1) return size; size = 0; - if (alarmID_ != null) { + if (alarmId_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getAlarmID()); + .computeMessageSize(1, getAlarmId()); } if (subscriptionTimeoutS_ != 0F) { size += com.google.protobuf.CodedOutputStream @@ -16697,10 +18952,10 @@ public final class Monitoring { } monitoring.Monitoring.AlarmSubscription other = (monitoring.Monitoring.AlarmSubscription) obj; - if (hasAlarmID() != other.hasAlarmID()) return false; - if (hasAlarmID()) { - if (!getAlarmID() - .equals(other.getAlarmID())) return false; + if (hasAlarmId() != other.hasAlarmId()) return false; + if (hasAlarmId()) { + if (!getAlarmId() + .equals(other.getAlarmId())) return false; } if (java.lang.Float.floatToIntBits(getSubscriptionTimeoutS()) != java.lang.Float.floatToIntBits( @@ -16719,9 +18974,9 @@ public final class Monitoring { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (hasAlarmID()) { - hash = (37 * hash) + ALARMID_FIELD_NUMBER; - hash = (53 * hash) + getAlarmID().hashCode(); + if (hasAlarmId()) { + hash = (37 * hash) + ALARM_ID_FIELD_NUMBER; + hash = (53 * hash) + getAlarmId().hashCode(); } hash = (37 * hash) + SUBSCRIPTION_TIMEOUT_S_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits( @@ -16862,11 +19117,11 @@ public final class Monitoring { @java.lang.Override public Builder clear() { super.clear(); - if (alarmIDBuilder_ == null) { - alarmID_ = null; + if (alarmIdBuilder_ == null) { + alarmId_ = null; } else { - alarmID_ = null; - alarmIDBuilder_ = null; + alarmId_ = null; + alarmIdBuilder_ = null; } subscriptionTimeoutS_ = 0F; @@ -16898,10 +19153,10 @@ public final class Monitoring { @java.lang.Override public monitoring.Monitoring.AlarmSubscription buildPartial() { monitoring.Monitoring.AlarmSubscription result = new monitoring.Monitoring.AlarmSubscription(this); - if (alarmIDBuilder_ == null) { - result.alarmID_ = alarmID_; + if (alarmIdBuilder_ == null) { + result.alarmId_ = alarmId_; } else { - result.alarmID_ = alarmIDBuilder_.build(); + result.alarmId_ = alarmIdBuilder_.build(); } result.subscriptionTimeoutS_ = subscriptionTimeoutS_; result.subscriptionFrequencyMs_ = subscriptionFrequencyMs_; @@ -16953,8 +19208,8 @@ public final class Monitoring { public Builder mergeFrom(monitoring.Monitoring.AlarmSubscription other) { if (other == monitoring.Monitoring.AlarmSubscription.getDefaultInstance()) return this; - if (other.hasAlarmID()) { - mergeAlarmID(other.getAlarmID()); + if (other.hasAlarmId()) { + mergeAlarmId(other.getAlarmId()); } if (other.getSubscriptionTimeoutS() != 0F) { setSubscriptionTimeoutS(other.getSubscriptionTimeoutS()); @@ -16991,123 +19246,123 @@ public final class Monitoring { return this; } - private monitoring.Monitoring.AlarmID alarmID_; + private monitoring.Monitoring.AlarmID alarmId_; private com.google.protobuf.SingleFieldBuilderV3< - monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder> alarmIDBuilder_; + monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder> alarmIdBuilder_; /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return Whether the alarmID field is set. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return Whether the alarmId field is set. */ - public boolean hasAlarmID() { - return alarmIDBuilder_ != null || alarmID_ != null; + public boolean hasAlarmId() { + return alarmIdBuilder_ != null || alarmId_ != null; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> - * @return The alarmID. + * <code>.monitoring.AlarmID alarm_id = 1;</code> + * @return The alarmId. */ - public monitoring.Monitoring.AlarmID getAlarmID() { - if (alarmIDBuilder_ == null) { - return alarmID_ == null ? monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmID_; + public monitoring.Monitoring.AlarmID getAlarmId() { + if (alarmIdBuilder_ == null) { + return alarmId_ == null ? monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmId_; } else { - return alarmIDBuilder_.getMessage(); + return alarmIdBuilder_.getMessage(); } } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public Builder setAlarmID(monitoring.Monitoring.AlarmID value) { - if (alarmIDBuilder_ == null) { + public Builder setAlarmId(monitoring.Monitoring.AlarmID value) { + if (alarmIdBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - alarmID_ = value; + alarmId_ = value; onChanged(); } else { - alarmIDBuilder_.setMessage(value); + alarmIdBuilder_.setMessage(value); } return this; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public Builder setAlarmID( + public Builder setAlarmId( monitoring.Monitoring.AlarmID.Builder builderForValue) { - if (alarmIDBuilder_ == null) { - alarmID_ = builderForValue.build(); + if (alarmIdBuilder_ == null) { + alarmId_ = builderForValue.build(); onChanged(); } else { - alarmIDBuilder_.setMessage(builderForValue.build()); + alarmIdBuilder_.setMessage(builderForValue.build()); } return this; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public Builder mergeAlarmID(monitoring.Monitoring.AlarmID value) { - if (alarmIDBuilder_ == null) { - if (alarmID_ != null) { - alarmID_ = - monitoring.Monitoring.AlarmID.newBuilder(alarmID_).mergeFrom(value).buildPartial(); + public Builder mergeAlarmId(monitoring.Monitoring.AlarmID value) { + if (alarmIdBuilder_ == null) { + if (alarmId_ != null) { + alarmId_ = + monitoring.Monitoring.AlarmID.newBuilder(alarmId_).mergeFrom(value).buildPartial(); } else { - alarmID_ = value; + alarmId_ = value; } onChanged(); } else { - alarmIDBuilder_.mergeFrom(value); + alarmIdBuilder_.mergeFrom(value); } return this; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public Builder clearAlarmID() { - if (alarmIDBuilder_ == null) { - alarmID_ = null; + public Builder clearAlarmId() { + if (alarmIdBuilder_ == null) { + alarmId_ = null; onChanged(); } else { - alarmID_ = null; - alarmIDBuilder_ = null; + alarmId_ = null; + alarmIdBuilder_ = null; } return this; } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public monitoring.Monitoring.AlarmID.Builder getAlarmIDBuilder() { + public monitoring.Monitoring.AlarmID.Builder getAlarmIdBuilder() { onChanged(); - return getAlarmIDFieldBuilder().getBuilder(); + return getAlarmIdFieldBuilder().getBuilder(); } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ - public monitoring.Monitoring.AlarmIDOrBuilder getAlarmIDOrBuilder() { - if (alarmIDBuilder_ != null) { - return alarmIDBuilder_.getMessageOrBuilder(); + public monitoring.Monitoring.AlarmIDOrBuilder getAlarmIdOrBuilder() { + if (alarmIdBuilder_ != null) { + return alarmIdBuilder_.getMessageOrBuilder(); } else { - return alarmID_ == null ? - monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmID_; + return alarmId_ == null ? + monitoring.Monitoring.AlarmID.getDefaultInstance() : alarmId_; } } /** - * <code>.monitoring.AlarmID alarmID = 1;</code> + * <code>.monitoring.AlarmID alarm_id = 1;</code> */ private com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder> - getAlarmIDFieldBuilder() { - if (alarmIDBuilder_ == null) { - alarmIDBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + getAlarmIdFieldBuilder() { + if (alarmIdBuilder_ == null) { + alarmIdBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder>( - getAlarmID(), + getAlarmId(), getParentForChildren(), isClean()); - alarmID_ = null; + alarmId_ = null; } - return alarmIDBuilder_; + return alarmIdBuilder_; } private float subscriptionTimeoutS_ ; @@ -17256,34 +19511,19 @@ public final class Monitoring { getTextBytes(); /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return Whether the kpiValue field is set. - */ - boolean hasKpiValue(); - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return The kpiValue. - */ - monitoring.Monitoring.KpiValue getKpiValue(); - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder(); - - /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return Whether the timestamp field is set. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return Whether the kpiList field is set. */ - boolean hasTimestamp(); + boolean hasKpiList(); /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return The timestamp. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return The kpiList. */ - context.ContextOuterClass.Timestamp getTimestamp(); + monitoring.Monitoring.KpiList getKpiList(); /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder(); + monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder(); } /** * Protobuf type {@code monitoring.AlarmResponse} @@ -17351,27 +19591,14 @@ public final class Monitoring { break; } case 26: { - monitoring.Monitoring.KpiValue.Builder subBuilder = null; - if (kpiValue_ != null) { - subBuilder = kpiValue_.toBuilder(); - } - kpiValue_ = input.readMessage(monitoring.Monitoring.KpiValue.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(kpiValue_); - kpiValue_ = subBuilder.buildPartial(); - } - - break; - } - case 34: { - context.ContextOuterClass.Timestamp.Builder subBuilder = null; - if (timestamp_ != null) { - subBuilder = timestamp_.toBuilder(); + monitoring.Monitoring.KpiList.Builder subBuilder = null; + if (kpiList_ != null) { + subBuilder = kpiList_.toBuilder(); } - timestamp_ = input.readMessage(context.ContextOuterClass.Timestamp.parser(), extensionRegistry); + kpiList_ = input.readMessage(monitoring.Monitoring.KpiList.parser(), extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(timestamp_); - timestamp_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(kpiList_); + kpiList_ = subBuilder.buildPartial(); } break; @@ -17472,56 +19699,30 @@ public final class Monitoring { } } - public static final int KPI_VALUE_FIELD_NUMBER = 3; - private monitoring.Monitoring.KpiValue kpiValue_; - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return Whether the kpiValue field is set. - */ - @java.lang.Override - public boolean hasKpiValue() { - return kpiValue_ != null; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return The kpiValue. - */ - @java.lang.Override - public monitoring.Monitoring.KpiValue getKpiValue() { - return kpiValue_ == null ? monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - @java.lang.Override - public monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder() { - return getKpiValue(); - } - - public static final int TIMESTAMP_FIELD_NUMBER = 4; - private context.ContextOuterClass.Timestamp timestamp_; + public static final int KPI_LIST_FIELD_NUMBER = 3; + private monitoring.Monitoring.KpiList kpiList_; /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return Whether the timestamp field is set. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return Whether the kpiList field is set. */ @java.lang.Override - public boolean hasTimestamp() { - return timestamp_ != null; + public boolean hasKpiList() { + return kpiList_ != null; } /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return The timestamp. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return The kpiList. */ @java.lang.Override - public context.ContextOuterClass.Timestamp getTimestamp() { - return timestamp_ == null ? context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + public monitoring.Monitoring.KpiList getKpiList() { + return kpiList_ == null ? monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ @java.lang.Override - public context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder() { - return getTimestamp(); + public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder() { + return getKpiList(); } private byte memoizedIsInitialized = -1; @@ -17544,11 +19745,8 @@ public final class Monitoring { if (!getTextBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, text_); } - if (kpiValue_ != null) { - output.writeMessage(3, getKpiValue()); - } - if (timestamp_ != null) { - output.writeMessage(4, getTimestamp()); + if (kpiList_ != null) { + output.writeMessage(3, getKpiList()); } unknownFields.writeTo(output); } @@ -17566,13 +19764,9 @@ public final class Monitoring { if (!getTextBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, text_); } - if (kpiValue_ != null) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, getKpiValue()); - } - if (timestamp_ != null) { + if (kpiList_ != null) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, getTimestamp()); + .computeMessageSize(3, getKpiList()); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -17596,15 +19790,10 @@ public final class Monitoring { } if (!getText() .equals(other.getText())) return false; - if (hasKpiValue() != other.hasKpiValue()) return false; - if (hasKpiValue()) { - if (!getKpiValue() - .equals(other.getKpiValue())) return false; - } - if (hasTimestamp() != other.hasTimestamp()) return false; - if (hasTimestamp()) { - if (!getTimestamp() - .equals(other.getTimestamp())) return false; + if (hasKpiList() != other.hasKpiList()) return false; + if (hasKpiList()) { + if (!getKpiList() + .equals(other.getKpiList())) return false; } if (!unknownFields.equals(other.unknownFields)) return false; return true; @@ -17623,13 +19812,9 @@ public final class Monitoring { } hash = (37 * hash) + TEXT_FIELD_NUMBER; hash = (53 * hash) + getText().hashCode(); - if (hasKpiValue()) { - hash = (37 * hash) + KPI_VALUE_FIELD_NUMBER; - hash = (53 * hash) + getKpiValue().hashCode(); - } - if (hasTimestamp()) { - hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + getTimestamp().hashCode(); + if (hasKpiList()) { + hash = (37 * hash) + KPI_LIST_FIELD_NUMBER; + hash = (53 * hash) + getKpiList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; @@ -17766,23 +19951,17 @@ public final class Monitoring { super.clear(); if (alarmIdBuilder_ == null) { alarmId_ = null; - } else { - alarmId_ = null; - alarmIdBuilder_ = null; - } - text_ = ""; - - if (kpiValueBuilder_ == null) { - kpiValue_ = null; - } else { - kpiValue_ = null; - kpiValueBuilder_ = null; + } else { + alarmId_ = null; + alarmIdBuilder_ = null; } - if (timestampBuilder_ == null) { - timestamp_ = null; + text_ = ""; + + if (kpiListBuilder_ == null) { + kpiList_ = null; } else { - timestamp_ = null; - timestampBuilder_ = null; + kpiList_ = null; + kpiListBuilder_ = null; } return this; } @@ -17816,15 +19995,10 @@ public final class Monitoring { result.alarmId_ = alarmIdBuilder_.build(); } result.text_ = text_; - if (kpiValueBuilder_ == null) { - result.kpiValue_ = kpiValue_; - } else { - result.kpiValue_ = kpiValueBuilder_.build(); - } - if (timestampBuilder_ == null) { - result.timestamp_ = timestamp_; + if (kpiListBuilder_ == null) { + result.kpiList_ = kpiList_; } else { - result.timestamp_ = timestampBuilder_.build(); + result.kpiList_ = kpiListBuilder_.build(); } onBuilt(); return result; @@ -17881,11 +20055,8 @@ public final class Monitoring { text_ = other.text_; onChanged(); } - if (other.hasKpiValue()) { - mergeKpiValue(other.getKpiValue()); - } - if (other.hasTimestamp()) { - mergeTimestamp(other.getTimestamp()); + if (other.hasKpiList()) { + mergeKpiList(other.getKpiList()); } this.mergeUnknownFields(other.unknownFields); onChanged(); @@ -18111,242 +20282,123 @@ public final class Monitoring { return this; } - private monitoring.Monitoring.KpiValue kpiValue_; - private com.google.protobuf.SingleFieldBuilderV3< - monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder> kpiValueBuilder_; - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return Whether the kpiValue field is set. - */ - public boolean hasKpiValue() { - return kpiValueBuilder_ != null || kpiValue_ != null; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - * @return The kpiValue. - */ - public monitoring.Monitoring.KpiValue getKpiValue() { - if (kpiValueBuilder_ == null) { - return kpiValue_ == null ? monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; - } else { - return kpiValueBuilder_.getMessage(); - } - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public Builder setKpiValue(monitoring.Monitoring.KpiValue value) { - if (kpiValueBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - kpiValue_ = value; - onChanged(); - } else { - kpiValueBuilder_.setMessage(value); - } - - return this; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public Builder setKpiValue( - monitoring.Monitoring.KpiValue.Builder builderForValue) { - if (kpiValueBuilder_ == null) { - kpiValue_ = builderForValue.build(); - onChanged(); - } else { - kpiValueBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public Builder mergeKpiValue(monitoring.Monitoring.KpiValue value) { - if (kpiValueBuilder_ == null) { - if (kpiValue_ != null) { - kpiValue_ = - monitoring.Monitoring.KpiValue.newBuilder(kpiValue_).mergeFrom(value).buildPartial(); - } else { - kpiValue_ = value; - } - onChanged(); - } else { - kpiValueBuilder_.mergeFrom(value); - } - - return this; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public Builder clearKpiValue() { - if (kpiValueBuilder_ == null) { - kpiValue_ = null; - onChanged(); - } else { - kpiValue_ = null; - kpiValueBuilder_ = null; - } - - return this; - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public monitoring.Monitoring.KpiValue.Builder getKpiValueBuilder() { - - onChanged(); - return getKpiValueFieldBuilder().getBuilder(); - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - public monitoring.Monitoring.KpiValueOrBuilder getKpiValueOrBuilder() { - if (kpiValueBuilder_ != null) { - return kpiValueBuilder_.getMessageOrBuilder(); - } else { - return kpiValue_ == null ? - monitoring.Monitoring.KpiValue.getDefaultInstance() : kpiValue_; - } - } - /** - * <code>.monitoring.KpiValue kpi_value = 3;</code> - */ - private com.google.protobuf.SingleFieldBuilderV3< - monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder> - getKpiValueFieldBuilder() { - if (kpiValueBuilder_ == null) { - kpiValueBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - monitoring.Monitoring.KpiValue, monitoring.Monitoring.KpiValue.Builder, monitoring.Monitoring.KpiValueOrBuilder>( - getKpiValue(), - getParentForChildren(), - isClean()); - kpiValue_ = null; - } - return kpiValueBuilder_; - } - - private context.ContextOuterClass.Timestamp timestamp_; + private monitoring.Monitoring.KpiList kpiList_; private com.google.protobuf.SingleFieldBuilderV3< - context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> timestampBuilder_; + monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder> kpiListBuilder_; /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return Whether the timestamp field is set. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return Whether the kpiList field is set. */ - public boolean hasTimestamp() { - return timestampBuilder_ != null || timestamp_ != null; + public boolean hasKpiList() { + return kpiListBuilder_ != null || kpiList_ != null; } /** - * <code>.context.Timestamp timestamp = 4;</code> - * @return The timestamp. + * <code>.monitoring.KpiList kpi_list = 3;</code> + * @return The kpiList. */ - public context.ContextOuterClass.Timestamp getTimestamp() { - if (timestampBuilder_ == null) { - return timestamp_ == null ? context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + public monitoring.Monitoring.KpiList getKpiList() { + if (kpiListBuilder_ == null) { + return kpiList_ == null ? monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } else { - return timestampBuilder_.getMessage(); + return kpiListBuilder_.getMessage(); } } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public Builder setTimestamp(context.ContextOuterClass.Timestamp value) { - if (timestampBuilder_ == null) { + public Builder setKpiList(monitoring.Monitoring.KpiList value) { + if (kpiListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - timestamp_ = value; + kpiList_ = value; onChanged(); } else { - timestampBuilder_.setMessage(value); + kpiListBuilder_.setMessage(value); } return this; } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public Builder setTimestamp( - context.ContextOuterClass.Timestamp.Builder builderForValue) { - if (timestampBuilder_ == null) { - timestamp_ = builderForValue.build(); + public Builder setKpiList( + monitoring.Monitoring.KpiList.Builder builderForValue) { + if (kpiListBuilder_ == null) { + kpiList_ = builderForValue.build(); onChanged(); } else { - timestampBuilder_.setMessage(builderForValue.build()); + kpiListBuilder_.setMessage(builderForValue.build()); } return this; } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public Builder mergeTimestamp(context.ContextOuterClass.Timestamp value) { - if (timestampBuilder_ == null) { - if (timestamp_ != null) { - timestamp_ = - context.ContextOuterClass.Timestamp.newBuilder(timestamp_).mergeFrom(value).buildPartial(); + public Builder mergeKpiList(monitoring.Monitoring.KpiList value) { + if (kpiListBuilder_ == null) { + if (kpiList_ != null) { + kpiList_ = + monitoring.Monitoring.KpiList.newBuilder(kpiList_).mergeFrom(value).buildPartial(); } else { - timestamp_ = value; + kpiList_ = value; } onChanged(); } else { - timestampBuilder_.mergeFrom(value); + kpiListBuilder_.mergeFrom(value); } return this; } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public Builder clearTimestamp() { - if (timestampBuilder_ == null) { - timestamp_ = null; + public Builder clearKpiList() { + if (kpiListBuilder_ == null) { + kpiList_ = null; onChanged(); } else { - timestamp_ = null; - timestampBuilder_ = null; + kpiList_ = null; + kpiListBuilder_ = null; } return this; } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public context.ContextOuterClass.Timestamp.Builder getTimestampBuilder() { + public monitoring.Monitoring.KpiList.Builder getKpiListBuilder() { onChanged(); - return getTimestampFieldBuilder().getBuilder(); + return getKpiListFieldBuilder().getBuilder(); } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ - public context.ContextOuterClass.TimestampOrBuilder getTimestampOrBuilder() { - if (timestampBuilder_ != null) { - return timestampBuilder_.getMessageOrBuilder(); + public monitoring.Monitoring.KpiListOrBuilder getKpiListOrBuilder() { + if (kpiListBuilder_ != null) { + return kpiListBuilder_.getMessageOrBuilder(); } else { - return timestamp_ == null ? - context.ContextOuterClass.Timestamp.getDefaultInstance() : timestamp_; + return kpiList_ == null ? + monitoring.Monitoring.KpiList.getDefaultInstance() : kpiList_; } } /** - * <code>.context.Timestamp timestamp = 4;</code> + * <code>.monitoring.KpiList kpi_list = 3;</code> */ private com.google.protobuf.SingleFieldBuilderV3< - context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder> - getTimestampFieldBuilder() { - if (timestampBuilder_ == null) { - timestampBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - context.ContextOuterClass.Timestamp, context.ContextOuterClass.Timestamp.Builder, context.ContextOuterClass.TimestampOrBuilder>( - getTimestamp(), + monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder> + getKpiListFieldBuilder() { + if (kpiListBuilder_ == null) { + kpiListBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + monitoring.Monitoring.KpiList, monitoring.Monitoring.KpiList.Builder, monitoring.Monitoring.KpiListOrBuilder>( + getKpiList(), getParentForChildren(), isClean()); - timestamp_ = null; + kpiList_ = null; } - return timestampBuilder_; + return kpiListBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -18401,55 +20453,55 @@ public final class Monitoring { } - public interface AlarmIDListOrBuilder extends - // @@protoc_insertion_point(interface_extends:monitoring.AlarmIDList) + public interface AlarmListOrBuilder extends + // @@protoc_insertion_point(interface_extends:monitoring.AlarmList) com.google.protobuf.MessageOrBuilder { /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - java.util.List<monitoring.Monitoring.AlarmID> - getAlarmListList(); + java.util.List<monitoring.Monitoring.AlarmDescriptor> + getAlarmDescriptorList(); /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - monitoring.Monitoring.AlarmID getAlarmList(int index); + monitoring.Monitoring.AlarmDescriptor getAlarmDescriptor(int index); /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - int getAlarmListCount(); + int getAlarmDescriptorCount(); /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - java.util.List<? extends monitoring.Monitoring.AlarmIDOrBuilder> - getAlarmListOrBuilderList(); + java.util.List<? extends monitoring.Monitoring.AlarmDescriptorOrBuilder> + getAlarmDescriptorOrBuilderList(); /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - monitoring.Monitoring.AlarmIDOrBuilder getAlarmListOrBuilder( + monitoring.Monitoring.AlarmDescriptorOrBuilder getAlarmDescriptorOrBuilder( int index); } /** - * Protobuf type {@code monitoring.AlarmIDList} + * Protobuf type {@code monitoring.AlarmList} */ - public static final class AlarmIDList extends + public static final class AlarmList extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:monitoring.AlarmIDList) - AlarmIDListOrBuilder { + // @@protoc_insertion_point(message_implements:monitoring.AlarmList) + AlarmListOrBuilder { private static final long serialVersionUID = 0L; - // Use AlarmIDList.newBuilder() to construct. - private AlarmIDList(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { + // Use AlarmList.newBuilder() to construct. + private AlarmList(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } - private AlarmIDList() { - alarmList_ = java.util.Collections.emptyList(); + private AlarmList() { + alarmDescriptor_ = java.util.Collections.emptyList(); } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance( UnusedPrivateParameter unused) { - return new AlarmIDList(); + return new AlarmList(); } @java.lang.Override @@ -18457,7 +20509,7 @@ public final class Monitoring { getUnknownFields() { return this.unknownFields; } - private AlarmIDList( + private AlarmList( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -18478,11 +20530,11 @@ public final class Monitoring { break; case 10: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - alarmList_ = new java.util.ArrayList<monitoring.Monitoring.AlarmID>(); + alarmDescriptor_ = new java.util.ArrayList<monitoring.Monitoring.AlarmDescriptor>(); mutable_bitField0_ |= 0x00000001; } - alarmList_.add( - input.readMessage(monitoring.Monitoring.AlarmID.parser(), extensionRegistry)); + alarmDescriptor_.add( + input.readMessage(monitoring.Monitoring.AlarmDescriptor.parser(), extensionRegistry)); break; } default: { @@ -18501,7 +20553,7 @@ public final class Monitoring { e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) != 0)) { - alarmList_ = java.util.Collections.unmodifiableList(alarmList_); + alarmDescriptor_ = java.util.Collections.unmodifiableList(alarmDescriptor_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -18509,55 +20561,55 @@ public final class Monitoring { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return monitoring.Monitoring.internal_static_monitoring_AlarmIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_AlarmList_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return monitoring.Monitoring.internal_static_monitoring_AlarmIDList_fieldAccessorTable + return monitoring.Monitoring.internal_static_monitoring_AlarmList_fieldAccessorTable .ensureFieldAccessorsInitialized( - monitoring.Monitoring.AlarmIDList.class, monitoring.Monitoring.AlarmIDList.Builder.class); + monitoring.Monitoring.AlarmList.class, monitoring.Monitoring.AlarmList.Builder.class); } - public static final int ALARM_LIST_FIELD_NUMBER = 1; - private java.util.List<monitoring.Monitoring.AlarmID> alarmList_; + public static final int ALARM_DESCRIPTOR_FIELD_NUMBER = 1; + private java.util.List<monitoring.Monitoring.AlarmDescriptor> alarmDescriptor_; /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ @java.lang.Override - public java.util.List<monitoring.Monitoring.AlarmID> getAlarmListList() { - return alarmList_; + public java.util.List<monitoring.Monitoring.AlarmDescriptor> getAlarmDescriptorList() { + return alarmDescriptor_; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ @java.lang.Override - public java.util.List<? extends monitoring.Monitoring.AlarmIDOrBuilder> - getAlarmListOrBuilderList() { - return alarmList_; + public java.util.List<? extends monitoring.Monitoring.AlarmDescriptorOrBuilder> + getAlarmDescriptorOrBuilderList() { + return alarmDescriptor_; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ @java.lang.Override - public int getAlarmListCount() { - return alarmList_.size(); + public int getAlarmDescriptorCount() { + return alarmDescriptor_.size(); } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.AlarmID getAlarmList(int index) { - return alarmList_.get(index); + public monitoring.Monitoring.AlarmDescriptor getAlarmDescriptor(int index) { + return alarmDescriptor_.get(index); } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ @java.lang.Override - public monitoring.Monitoring.AlarmIDOrBuilder getAlarmListOrBuilder( + public monitoring.Monitoring.AlarmDescriptorOrBuilder getAlarmDescriptorOrBuilder( int index) { - return alarmList_.get(index); + return alarmDescriptor_.get(index); } private byte memoizedIsInitialized = -1; @@ -18574,8 +20626,8 @@ public final class Monitoring { @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < alarmList_.size(); i++) { - output.writeMessage(1, alarmList_.get(i)); + for (int i = 0; i < alarmDescriptor_.size(); i++) { + output.writeMessage(1, alarmDescriptor_.get(i)); } unknownFields.writeTo(output); } @@ -18586,9 +20638,9 @@ public final class Monitoring { if (size != -1) return size; size = 0; - for (int i = 0; i < alarmList_.size(); i++) { + for (int i = 0; i < alarmDescriptor_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, alarmList_.get(i)); + .computeMessageSize(1, alarmDescriptor_.get(i)); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -18600,13 +20652,13 @@ public final class Monitoring { if (obj == this) { return true; } - if (!(obj instanceof monitoring.Monitoring.AlarmIDList)) { + if (!(obj instanceof monitoring.Monitoring.AlarmList)) { return super.equals(obj); } - monitoring.Monitoring.AlarmIDList other = (monitoring.Monitoring.AlarmIDList) obj; + monitoring.Monitoring.AlarmList other = (monitoring.Monitoring.AlarmList) obj; - if (!getAlarmListList() - .equals(other.getAlarmListList())) return false; + if (!getAlarmDescriptorList() + .equals(other.getAlarmDescriptorList())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -18618,78 +20670,78 @@ public final class Monitoring { } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); - if (getAlarmListCount() > 0) { - hash = (37 * hash) + ALARM_LIST_FIELD_NUMBER; - hash = (53 * hash) + getAlarmListList().hashCode(); + if (getAlarmDescriptorCount() > 0) { + hash = (37 * hash) + ALARM_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getAlarmDescriptorList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.AlarmIDList parseFrom(byte[] data) + public static monitoring.Monitoring.AlarmList parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static monitoring.Monitoring.AlarmIDList parseFrom(java.io.InputStream input) + public static monitoring.Monitoring.AlarmList parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } - public static monitoring.Monitoring.AlarmIDList parseDelimitedFrom(java.io.InputStream input) + public static monitoring.Monitoring.AlarmList parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } - public static monitoring.Monitoring.AlarmIDList parseDelimitedFrom( + public static monitoring.Monitoring.AlarmList parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } - public static monitoring.Monitoring.AlarmIDList parseFrom( + public static monitoring.Monitoring.AlarmList parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -18702,7 +20754,7 @@ public final class Monitoring { public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(monitoring.Monitoring.AlarmIDList prototype) { + public static Builder newBuilder(monitoring.Monitoring.AlarmList prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override @@ -18718,26 +20770,26 @@ public final class Monitoring { return builder; } /** - * Protobuf type {@code monitoring.AlarmIDList} + * Protobuf type {@code monitoring.AlarmList} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements - // @@protoc_insertion_point(builder_implements:monitoring.AlarmIDList) - monitoring.Monitoring.AlarmIDListOrBuilder { + // @@protoc_insertion_point(builder_implements:monitoring.AlarmList) + monitoring.Monitoring.AlarmListOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return monitoring.Monitoring.internal_static_monitoring_AlarmIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_AlarmList_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return monitoring.Monitoring.internal_static_monitoring_AlarmIDList_fieldAccessorTable + return monitoring.Monitoring.internal_static_monitoring_AlarmList_fieldAccessorTable .ensureFieldAccessorsInitialized( - monitoring.Monitoring.AlarmIDList.class, monitoring.Monitoring.AlarmIDList.Builder.class); + monitoring.Monitoring.AlarmList.class, monitoring.Monitoring.AlarmList.Builder.class); } - // Construct using monitoring.Monitoring.AlarmIDList.newBuilder() + // Construct using monitoring.Monitoring.AlarmList.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -18750,17 +20802,17 @@ public final class Monitoring { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { - getAlarmListFieldBuilder(); + getAlarmDescriptorFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); - if (alarmListBuilder_ == null) { - alarmList_ = java.util.Collections.emptyList(); + if (alarmDescriptorBuilder_ == null) { + alarmDescriptor_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - alarmListBuilder_.clear(); + alarmDescriptorBuilder_.clear(); } return this; } @@ -18768,17 +20820,17 @@ public final class Monitoring { @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return monitoring.Monitoring.internal_static_monitoring_AlarmIDList_descriptor; + return monitoring.Monitoring.internal_static_monitoring_AlarmList_descriptor; } @java.lang.Override - public monitoring.Monitoring.AlarmIDList getDefaultInstanceForType() { - return monitoring.Monitoring.AlarmIDList.getDefaultInstance(); + public monitoring.Monitoring.AlarmList getDefaultInstanceForType() { + return monitoring.Monitoring.AlarmList.getDefaultInstance(); } @java.lang.Override - public monitoring.Monitoring.AlarmIDList build() { - monitoring.Monitoring.AlarmIDList result = buildPartial(); + public monitoring.Monitoring.AlarmList build() { + monitoring.Monitoring.AlarmList result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } @@ -18786,17 +20838,17 @@ public final class Monitoring { } @java.lang.Override - public monitoring.Monitoring.AlarmIDList buildPartial() { - monitoring.Monitoring.AlarmIDList result = new monitoring.Monitoring.AlarmIDList(this); + public monitoring.Monitoring.AlarmList buildPartial() { + monitoring.Monitoring.AlarmList result = new monitoring.Monitoring.AlarmList(this); int from_bitField0_ = bitField0_; - if (alarmListBuilder_ == null) { + if (alarmDescriptorBuilder_ == null) { if (((bitField0_ & 0x00000001) != 0)) { - alarmList_ = java.util.Collections.unmodifiableList(alarmList_); + alarmDescriptor_ = java.util.Collections.unmodifiableList(alarmDescriptor_); bitField0_ = (bitField0_ & ~0x00000001); } - result.alarmList_ = alarmList_; + result.alarmDescriptor_ = alarmDescriptor_; } else { - result.alarmList_ = alarmListBuilder_.build(); + result.alarmDescriptor_ = alarmDescriptorBuilder_.build(); } onBuilt(); return result; @@ -18836,39 +20888,39 @@ public final class Monitoring { } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof monitoring.Monitoring.AlarmIDList) { - return mergeFrom((monitoring.Monitoring.AlarmIDList)other); + if (other instanceof monitoring.Monitoring.AlarmList) { + return mergeFrom((monitoring.Monitoring.AlarmList)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(monitoring.Monitoring.AlarmIDList other) { - if (other == monitoring.Monitoring.AlarmIDList.getDefaultInstance()) return this; - if (alarmListBuilder_ == null) { - if (!other.alarmList_.isEmpty()) { - if (alarmList_.isEmpty()) { - alarmList_ = other.alarmList_; + public Builder mergeFrom(monitoring.Monitoring.AlarmList other) { + if (other == monitoring.Monitoring.AlarmList.getDefaultInstance()) return this; + if (alarmDescriptorBuilder_ == null) { + if (!other.alarmDescriptor_.isEmpty()) { + if (alarmDescriptor_.isEmpty()) { + alarmDescriptor_ = other.alarmDescriptor_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureAlarmListIsMutable(); - alarmList_.addAll(other.alarmList_); + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.addAll(other.alarmDescriptor_); } onChanged(); } } else { - if (!other.alarmList_.isEmpty()) { - if (alarmListBuilder_.isEmpty()) { - alarmListBuilder_.dispose(); - alarmListBuilder_ = null; - alarmList_ = other.alarmList_; + if (!other.alarmDescriptor_.isEmpty()) { + if (alarmDescriptorBuilder_.isEmpty()) { + alarmDescriptorBuilder_.dispose(); + alarmDescriptorBuilder_ = null; + alarmDescriptor_ = other.alarmDescriptor_; bitField0_ = (bitField0_ & ~0x00000001); - alarmListBuilder_ = + alarmDescriptorBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? - getAlarmListFieldBuilder() : null; + getAlarmDescriptorFieldBuilder() : null; } else { - alarmListBuilder_.addAllMessages(other.alarmList_); + alarmDescriptorBuilder_.addAllMessages(other.alarmDescriptor_); } } } @@ -18887,11 +20939,11 @@ public final class Monitoring { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - monitoring.Monitoring.AlarmIDList parsedMessage = null; + monitoring.Monitoring.AlarmList parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (monitoring.Monitoring.AlarmIDList) e.getUnfinishedMessage(); + parsedMessage = (monitoring.Monitoring.AlarmList) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -18902,244 +20954,244 @@ public final class Monitoring { } private int bitField0_; - private java.util.List<monitoring.Monitoring.AlarmID> alarmList_ = + private java.util.List<monitoring.Monitoring.AlarmDescriptor> alarmDescriptor_ = java.util.Collections.emptyList(); - private void ensureAlarmListIsMutable() { + private void ensureAlarmDescriptorIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { - alarmList_ = new java.util.ArrayList<monitoring.Monitoring.AlarmID>(alarmList_); + alarmDescriptor_ = new java.util.ArrayList<monitoring.Monitoring.AlarmDescriptor>(alarmDescriptor_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder> alarmListBuilder_; + monitoring.Monitoring.AlarmDescriptor, monitoring.Monitoring.AlarmDescriptor.Builder, monitoring.Monitoring.AlarmDescriptorOrBuilder> alarmDescriptorBuilder_; /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public java.util.List<monitoring.Monitoring.AlarmID> getAlarmListList() { - if (alarmListBuilder_ == null) { - return java.util.Collections.unmodifiableList(alarmList_); + public java.util.List<monitoring.Monitoring.AlarmDescriptor> getAlarmDescriptorList() { + if (alarmDescriptorBuilder_ == null) { + return java.util.Collections.unmodifiableList(alarmDescriptor_); } else { - return alarmListBuilder_.getMessageList(); + return alarmDescriptorBuilder_.getMessageList(); } } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public int getAlarmListCount() { - if (alarmListBuilder_ == null) { - return alarmList_.size(); + public int getAlarmDescriptorCount() { + if (alarmDescriptorBuilder_ == null) { + return alarmDescriptor_.size(); } else { - return alarmListBuilder_.getCount(); + return alarmDescriptorBuilder_.getCount(); } } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public monitoring.Monitoring.AlarmID getAlarmList(int index) { - if (alarmListBuilder_ == null) { - return alarmList_.get(index); + public monitoring.Monitoring.AlarmDescriptor getAlarmDescriptor(int index) { + if (alarmDescriptorBuilder_ == null) { + return alarmDescriptor_.get(index); } else { - return alarmListBuilder_.getMessage(index); + return alarmDescriptorBuilder_.getMessage(index); } } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder setAlarmList( - int index, monitoring.Monitoring.AlarmID value) { - if (alarmListBuilder_ == null) { + public Builder setAlarmDescriptor( + int index, monitoring.Monitoring.AlarmDescriptor value) { + if (alarmDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureAlarmListIsMutable(); - alarmList_.set(index, value); + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.set(index, value); onChanged(); } else { - alarmListBuilder_.setMessage(index, value); + alarmDescriptorBuilder_.setMessage(index, value); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder setAlarmList( - int index, monitoring.Monitoring.AlarmID.Builder builderForValue) { - if (alarmListBuilder_ == null) { - ensureAlarmListIsMutable(); - alarmList_.set(index, builderForValue.build()); + public Builder setAlarmDescriptor( + int index, monitoring.Monitoring.AlarmDescriptor.Builder builderForValue) { + if (alarmDescriptorBuilder_ == null) { + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.set(index, builderForValue.build()); onChanged(); } else { - alarmListBuilder_.setMessage(index, builderForValue.build()); + alarmDescriptorBuilder_.setMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder addAlarmList(monitoring.Monitoring.AlarmID value) { - if (alarmListBuilder_ == null) { + public Builder addAlarmDescriptor(monitoring.Monitoring.AlarmDescriptor value) { + if (alarmDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureAlarmListIsMutable(); - alarmList_.add(value); + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.add(value); onChanged(); } else { - alarmListBuilder_.addMessage(value); + alarmDescriptorBuilder_.addMessage(value); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder addAlarmList( - int index, monitoring.Monitoring.AlarmID value) { - if (alarmListBuilder_ == null) { + public Builder addAlarmDescriptor( + int index, monitoring.Monitoring.AlarmDescriptor value) { + if (alarmDescriptorBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureAlarmListIsMutable(); - alarmList_.add(index, value); + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.add(index, value); onChanged(); } else { - alarmListBuilder_.addMessage(index, value); + alarmDescriptorBuilder_.addMessage(index, value); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder addAlarmList( - monitoring.Monitoring.AlarmID.Builder builderForValue) { - if (alarmListBuilder_ == null) { - ensureAlarmListIsMutable(); - alarmList_.add(builderForValue.build()); + public Builder addAlarmDescriptor( + monitoring.Monitoring.AlarmDescriptor.Builder builderForValue) { + if (alarmDescriptorBuilder_ == null) { + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.add(builderForValue.build()); onChanged(); } else { - alarmListBuilder_.addMessage(builderForValue.build()); + alarmDescriptorBuilder_.addMessage(builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder addAlarmList( - int index, monitoring.Monitoring.AlarmID.Builder builderForValue) { - if (alarmListBuilder_ == null) { - ensureAlarmListIsMutable(); - alarmList_.add(index, builderForValue.build()); + public Builder addAlarmDescriptor( + int index, monitoring.Monitoring.AlarmDescriptor.Builder builderForValue) { + if (alarmDescriptorBuilder_ == null) { + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.add(index, builderForValue.build()); onChanged(); } else { - alarmListBuilder_.addMessage(index, builderForValue.build()); + alarmDescriptorBuilder_.addMessage(index, builderForValue.build()); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder addAllAlarmList( - java.lang.Iterable<? extends monitoring.Monitoring.AlarmID> values) { - if (alarmListBuilder_ == null) { - ensureAlarmListIsMutable(); + public Builder addAllAlarmDescriptor( + java.lang.Iterable<? extends monitoring.Monitoring.AlarmDescriptor> values) { + if (alarmDescriptorBuilder_ == null) { + ensureAlarmDescriptorIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, alarmList_); + values, alarmDescriptor_); onChanged(); } else { - alarmListBuilder_.addAllMessages(values); + alarmDescriptorBuilder_.addAllMessages(values); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder clearAlarmList() { - if (alarmListBuilder_ == null) { - alarmList_ = java.util.Collections.emptyList(); + public Builder clearAlarmDescriptor() { + if (alarmDescriptorBuilder_ == null) { + alarmDescriptor_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - alarmListBuilder_.clear(); + alarmDescriptorBuilder_.clear(); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public Builder removeAlarmList(int index) { - if (alarmListBuilder_ == null) { - ensureAlarmListIsMutable(); - alarmList_.remove(index); + public Builder removeAlarmDescriptor(int index) { + if (alarmDescriptorBuilder_ == null) { + ensureAlarmDescriptorIsMutable(); + alarmDescriptor_.remove(index); onChanged(); } else { - alarmListBuilder_.remove(index); + alarmDescriptorBuilder_.remove(index); } return this; } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public monitoring.Monitoring.AlarmID.Builder getAlarmListBuilder( + public monitoring.Monitoring.AlarmDescriptor.Builder getAlarmDescriptorBuilder( int index) { - return getAlarmListFieldBuilder().getBuilder(index); + return getAlarmDescriptorFieldBuilder().getBuilder(index); } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public monitoring.Monitoring.AlarmIDOrBuilder getAlarmListOrBuilder( + public monitoring.Monitoring.AlarmDescriptorOrBuilder getAlarmDescriptorOrBuilder( int index) { - if (alarmListBuilder_ == null) { - return alarmList_.get(index); } else { - return alarmListBuilder_.getMessageOrBuilder(index); + if (alarmDescriptorBuilder_ == null) { + return alarmDescriptor_.get(index); } else { + return alarmDescriptorBuilder_.getMessageOrBuilder(index); } } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public java.util.List<? extends monitoring.Monitoring.AlarmIDOrBuilder> - getAlarmListOrBuilderList() { - if (alarmListBuilder_ != null) { - return alarmListBuilder_.getMessageOrBuilderList(); + public java.util.List<? extends monitoring.Monitoring.AlarmDescriptorOrBuilder> + getAlarmDescriptorOrBuilderList() { + if (alarmDescriptorBuilder_ != null) { + return alarmDescriptorBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(alarmList_); + return java.util.Collections.unmodifiableList(alarmDescriptor_); } } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public monitoring.Monitoring.AlarmID.Builder addAlarmListBuilder() { - return getAlarmListFieldBuilder().addBuilder( - monitoring.Monitoring.AlarmID.getDefaultInstance()); + public monitoring.Monitoring.AlarmDescriptor.Builder addAlarmDescriptorBuilder() { + return getAlarmDescriptorFieldBuilder().addBuilder( + monitoring.Monitoring.AlarmDescriptor.getDefaultInstance()); } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public monitoring.Monitoring.AlarmID.Builder addAlarmListBuilder( + public monitoring.Monitoring.AlarmDescriptor.Builder addAlarmDescriptorBuilder( int index) { - return getAlarmListFieldBuilder().addBuilder( - index, monitoring.Monitoring.AlarmID.getDefaultInstance()); + return getAlarmDescriptorFieldBuilder().addBuilder( + index, monitoring.Monitoring.AlarmDescriptor.getDefaultInstance()); } /** - * <code>repeated .monitoring.AlarmID alarm_list = 1;</code> + * <code>repeated .monitoring.AlarmDescriptor alarm_descriptor = 1;</code> */ - public java.util.List<monitoring.Monitoring.AlarmID.Builder> - getAlarmListBuilderList() { - return getAlarmListFieldBuilder().getBuilderList(); + public java.util.List<monitoring.Monitoring.AlarmDescriptor.Builder> + getAlarmDescriptorBuilderList() { + return getAlarmDescriptorFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder> - getAlarmListFieldBuilder() { - if (alarmListBuilder_ == null) { - alarmListBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - monitoring.Monitoring.AlarmID, monitoring.Monitoring.AlarmID.Builder, monitoring.Monitoring.AlarmIDOrBuilder>( - alarmList_, + monitoring.Monitoring.AlarmDescriptor, monitoring.Monitoring.AlarmDescriptor.Builder, monitoring.Monitoring.AlarmDescriptorOrBuilder> + getAlarmDescriptorFieldBuilder() { + if (alarmDescriptorBuilder_ == null) { + alarmDescriptorBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + monitoring.Monitoring.AlarmDescriptor, monitoring.Monitoring.AlarmDescriptor.Builder, monitoring.Monitoring.AlarmDescriptorOrBuilder>( + alarmDescriptor_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - alarmList_ = null; + alarmDescriptor_ = null; } - return alarmListBuilder_; + return alarmDescriptorBuilder_; } @java.lang.Override public final Builder setUnknownFields( @@ -19154,41 +21206,41 @@ public final class Monitoring { } - // @@protoc_insertion_point(builder_scope:monitoring.AlarmIDList) + // @@protoc_insertion_point(builder_scope:monitoring.AlarmList) } - // @@protoc_insertion_point(class_scope:monitoring.AlarmIDList) - private static final monitoring.Monitoring.AlarmIDList DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:monitoring.AlarmList) + private static final monitoring.Monitoring.AlarmList DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new monitoring.Monitoring.AlarmIDList(); + DEFAULT_INSTANCE = new monitoring.Monitoring.AlarmList(); } - public static monitoring.Monitoring.AlarmIDList getDefaultInstance() { + public static monitoring.Monitoring.AlarmList getDefaultInstance() { return DEFAULT_INSTANCE; } - private static final com.google.protobuf.Parser<AlarmIDList> - PARSER = new com.google.protobuf.AbstractParser<AlarmIDList>() { + private static final com.google.protobuf.Parser<AlarmList> + PARSER = new com.google.protobuf.AbstractParser<AlarmList>() { @java.lang.Override - public AlarmIDList parsePartialFrom( + public AlarmList parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new AlarmIDList(input, extensionRegistry); + return new AlarmList(input, extensionRegistry); } }; - public static com.google.protobuf.Parser<AlarmIDList> parser() { + public static com.google.protobuf.Parser<AlarmList> parser() { return PARSER; } @java.lang.Override - public com.google.protobuf.Parser<AlarmIDList> getParserForType() { + public com.google.protobuf.Parser<AlarmList> getParserForType() { return PARSER; } @java.lang.Override - public monitoring.Monitoring.AlarmIDList getDefaultInstanceForType() { + public monitoring.Monitoring.AlarmList getDefaultInstanceForType() { return DEFAULT_INSTANCE; } @@ -19209,6 +21261,21 @@ public final class Monitoring { private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_monitoring_KpiQuery_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_monitoring_RawKpi_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_monitoring_RawKpi_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_monitoring_RawKpiList_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_monitoring_RawKpiList_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_monitoring_RawKpiTable_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_monitoring_RawKpiTable_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_monitoring_KpiId_descriptor; private static final @@ -19255,10 +21322,10 @@ public final class Monitoring { com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_monitoring_SubsResponse_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_monitoring_SubsIDList_descriptor; + internal_static_monitoring_SubsList_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_monitoring_SubsIDList_fieldAccessorTable; + internal_static_monitoring_SubsList_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_monitoring_AlarmDescriptor_descriptor; private static final @@ -19280,10 +21347,10 @@ public final class Monitoring { com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_monitoring_AlarmResponse_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor - internal_static_monitoring_AlarmIDList_descriptor; + internal_static_monitoring_AlarmList_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_monitoring_AlarmIDList_fieldAccessorTable; + internal_static_monitoring_AlarmList_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -19294,7 +21361,7 @@ public final class Monitoring { static { java.lang.String[] descriptorData = { "\n\020monitoring.proto\022\nmonitoring\032\rcontext." + - "proto\032\026kpi_sample_types.proto\"\311\002\n\rKpiDes" + + "proto\032\026kpi_sample_types.proto\"\367\002\n\rKpiDes" + "criptor\022!\n\006kpi_id\030\001 \001(\0132\021.monitoring.Kpi" + "Id\022\027\n\017kpi_description\030\002 \001(\t\022&\n\013kpi_id_li" + "st\030\003 \003(\0132\021.monitoring.KpiId\0228\n\017kpi_sampl" + @@ -19303,86 +21370,92 @@ public final class Monitoring { "eId\022(\n\013endpoint_id\030\006 \001(\0132\023.context.EndPo" + "intId\022&\n\nservice_id\030\007 \001(\0132\022.context.Serv" + "iceId\022\"\n\010slice_id\030\010 \001(\0132\020.context.SliceI" + - "d\"l\n\021MonitorKpiRequest\022!\n\006kpi_id\030\001 \001(\0132\021" + - ".monitoring.KpiId\022\033\n\023monitoring_window_s" + - "\030\002 \001(\002\022\027\n\017sampling_rate_s\030\003 \001(\002\"\323\001\n\010KpiQ" + - "uery\022!\n\006kpi_id\030\001 \003(\0132\021.monitoring.KpiId\022" + - "\033\n\023monitoring_window_s\030\002 \001(\002\022\027\n\017sampling" + - "_rate_s\030\003 \001(\002\022\026\n\016last_n_samples\030\004 \001(\r\022+\n" + - "\017start_timestamp\030\005 \001(\0132\022.context.Timesta" + - "mp\022)\n\rend_timestamp\030\006 \001(\0132\022.context.Time" + - "stamp\"&\n\005KpiId\022\035\n\006kpi_id\030\001 \001(\0132\r.context" + - ".Uuid\"x\n\003Kpi\022!\n\006kpi_id\030\001 \001(\0132\021.monitorin" + - "g.KpiId\022%\n\ttimestamp\030\002 \001(\0132\022.context.Tim" + - "estamp\022\'\n\tkpi_value\030\003 \001(\0132\024.monitoring.K" + - "piValue\"\250\001\n\rKpiValueRange\022)\n\013kpiMinValue" + - "\030\001 \001(\0132\024.monitoring.KpiValue\022)\n\013kpiMaxVa" + - "lue\030\002 \001(\0132\024.monitoring.KpiValue\022\017\n\007inRan" + - "ge\030\003 \001(\010\022\027\n\017includeMinValue\030\004 \001(\010\022\027\n\017inc" + - "ludeMaxValue\030\005 \001(\010\"\241\001\n\010KpiValue\022\022\n\010int32" + - "Val\030\001 \001(\005H\000\022\023\n\tuint32Val\030\002 \001(\rH\000\022\022\n\010int6" + - "4Val\030\003 \001(\003H\000\022\023\n\tuint64Val\030\004 \001(\004H\000\022\022\n\010flo" + - "atVal\030\005 \001(\002H\000\022\023\n\tstringVal\030\006 \001(\tH\000\022\021\n\007bo" + - "olVal\030\007 \001(\010H\000B\007\n\005value\",\n\007KpiList\022!\n\010kpi" + - "_list\030\001 \003(\0132\017.monitoring.Kpi\"K\n\021KpiDescr" + - "iptorList\0226\n\023kpi_descriptor_list\030\001 \003(\0132\031" + - ".monitoring.KpiDescriptor\"\362\001\n\016SubsDescri" + - "ptor\022+\n\007subs_id\030\001 \001(\0132\032.monitoring.Subsc" + - "riptionID\022!\n\006kpi_id\030\002 \001(\0132\021.monitoring.K" + - "piId\022\033\n\023sampling_duration_s\030\003 \001(\002\022\033\n\023sam" + - "pling_interval_s\030\004 \001(\002\022+\n\017start_timestam" + - "p\030\005 \001(\0132\022.context.Timestamp\022)\n\rend_times" + - "tamp\030\006 \001(\0132\022.context.Timestamp\"0\n\016Subscr" + - "iptionID\022\036\n\007subs_id\030\001 \001(\0132\r.context.Uuid" + - "\"b\n\014SubsResponse\022+\n\007subs_id\030\001 \001(\0132\032.moni" + - "toring.SubscriptionID\022%\n\010kpi_list\030\002 \003(\0132" + - "\023.monitoring.KpiList\";\n\nSubsIDList\022-\n\tsu" + - "bs_list\030\001 \003(\0132\032.monitoring.SubscriptionI" + - "D\"\337\001\n\017AlarmDescriptor\022%\n\010alarm_id\030\001 \001(\0132" + - "\023.monitoring.AlarmID\022\031\n\021alarm_descriptio" + - "n\030\002 \001(\t\022\014\n\004name\030\003 \001(\t\022!\n\006kpi_id\030\004 \003(\0132\021." + - "monitoring.KpiId\0222\n\017kpi_value_range\030\005 \003(" + - "\0132\031.monitoring.KpiValueRange\022%\n\ttimestam" + - "p\030\006 \001(\0132\022.context.Timestamp\"*\n\007AlarmID\022\037" + - "\n\010alarm_id\030\001 \001(\0132\r.context.Uuid\"|\n\021Alarm" + - "Subscription\022$\n\007alarmID\030\001 \001(\0132\023.monitori" + - "ng.AlarmID\022\036\n\026subscription_timeout_s\030\002 \001" + - "(\002\022!\n\031subscription_frequency_ms\030\003 \001(\002\"\224\001" + - "\n\rAlarmResponse\022%\n\010alarm_id\030\001 \001(\0132\023.moni" + - "toring.AlarmID\022\014\n\004text\030\002 \001(\t\022\'\n\tkpi_valu" + - "e\030\003 \001(\0132\024.monitoring.KpiValue\022%\n\ttimesta" + - "mp\030\004 \001(\0132\022.context.Timestamp\"6\n\013AlarmIDL" + - "ist\022\'\n\nalarm_list\030\001 \003(\0132\023.monitoring.Ala" + - "rmID2\233\t\n\021MonitoringService\0228\n\006SetKpi\022\031.m" + - "onitoring.KpiDescriptor\032\021.monitoring.Kpi" + - "Id\"\000\0220\n\tDeleteKpi\022\021.monitoring.KpiId\032\016.c" + - "ontext.Empty\"\000\022B\n\020GetKpiDescriptor\022\021.mon" + - "itoring.KpiId\032\031.monitoring.KpiDescriptor" + - "\"\000\022G\n\024GetKpiDescriptorList\022\016.context.Emp" + - "ty\032\035.monitoring.KpiDescriptorList\"\000\022/\n\nI" + - "ncludeKpi\022\017.monitoring.Kpi\032\016.context.Emp" + - "ty\"\000\022=\n\nMonitorKpi\022\035.monitoring.MonitorK" + - "piRequest\032\016.context.Empty\"\000\022;\n\014QueryKpiD" + - "ata\022\024.monitoring.KpiQuery\032\023.monitoring.K" + - "piList\"\000\022I\n\022SetKpiSubscription\022\032.monitor" + - "ing.SubsDescriptor\032\023.monitoring.KpiList\"" + + "d\022,\n\rconnection_id\030\t \001(\0132\025.context.Conne" + + "ctionId\"l\n\021MonitorKpiRequest\022!\n\006kpi_id\030\001" + + " \001(\0132\021.monitoring.KpiId\022\033\n\023monitoring_wi" + + "ndow_s\030\002 \001(\002\022\027\n\017sampling_rate_s\030\003 \001(\002\"\273\001" + + "\n\010KpiQuery\022\"\n\007kpi_ids\030\001 \003(\0132\021.monitoring" + + ".KpiId\022\033\n\023monitoring_window_s\030\002 \001(\002\022\026\n\016l" + + "ast_n_samples\030\003 \001(\r\022+\n\017start_timestamp\030\004" + + " \001(\0132\022.context.Timestamp\022)\n\rend_timestam" + + "p\030\005 \001(\0132\022.context.Timestamp\"X\n\006RawKpi\022%\n" + + "\ttimestamp\030\001 \001(\0132\022.context.Timestamp\022\'\n\t" + + "kpi_value\030\002 \001(\0132\024.monitoring.KpiValue\"U\n" + + "\nRawKpiList\022!\n\006kpi_id\030\001 \001(\0132\021.monitoring" + + ".KpiId\022$\n\010raw_kpis\030\002 \003(\0132\022.monitoring.Ra" + + "wKpi\"<\n\013RawKpiTable\022-\n\rraw_kpi_lists\030\001 \003" + + "(\0132\026.monitoring.RawKpiList\"&\n\005KpiId\022\035\n\006k" + + "pi_id\030\001 \001(\0132\r.context.Uuid\"x\n\003Kpi\022!\n\006kpi" + + "_id\030\001 \001(\0132\021.monitoring.KpiId\022%\n\ttimestam" + + "p\030\002 \001(\0132\022.context.Timestamp\022\'\n\tkpi_value" + + "\030\003 \001(\0132\024.monitoring.KpiValue\"\250\001\n\rKpiValu" + + "eRange\022)\n\013kpiMinValue\030\001 \001(\0132\024.monitoring" + + ".KpiValue\022)\n\013kpiMaxValue\030\002 \001(\0132\024.monitor" + + "ing.KpiValue\022\017\n\007inRange\030\003 \001(\010\022\027\n\017include" + + "MinValue\030\004 \001(\010\022\027\n\017includeMaxValue\030\005 \001(\010\"" + + "\241\001\n\010KpiValue\022\022\n\010int32Val\030\001 \001(\005H\000\022\023\n\tuint" + + "32Val\030\002 \001(\rH\000\022\022\n\010int64Val\030\003 \001(\003H\000\022\023\n\tuin" + + "t64Val\030\004 \001(\004H\000\022\022\n\010floatVal\030\005 \001(\002H\000\022\023\n\tst" + + "ringVal\030\006 \001(\tH\000\022\021\n\007boolVal\030\007 \001(\010H\000B\007\n\005va" + + "lue\"\'\n\007KpiList\022\034\n\003kpi\030\001 \003(\0132\017.monitoring" + + ".Kpi\"K\n\021KpiDescriptorList\0226\n\023kpi_descrip" + + "tor_list\030\001 \003(\0132\031.monitoring.KpiDescripto" + + "r\"\362\001\n\016SubsDescriptor\022+\n\007subs_id\030\001 \001(\0132\032." + + "monitoring.SubscriptionID\022!\n\006kpi_id\030\002 \001(" + + "\0132\021.monitoring.KpiId\022\033\n\023sampling_duratio" + + "n_s\030\003 \001(\002\022\033\n\023sampling_interval_s\030\004 \001(\002\022+" + + "\n\017start_timestamp\030\005 \001(\0132\022.context.Timest" + + "amp\022)\n\rend_timestamp\030\006 \001(\0132\022.context.Tim" + + "estamp\"0\n\016SubscriptionID\022\036\n\007subs_id\030\001 \001(" + + "\0132\r.context.Uuid\"b\n\014SubsResponse\022+\n\007subs" + + "_id\030\001 \001(\0132\032.monitoring.SubscriptionID\022%\n" + + "\010kpi_list\030\002 \001(\0132\023.monitoring.KpiList\"?\n\010" + + "SubsList\0223\n\017subs_descriptor\030\001 \003(\0132\032.moni" + + "toring.SubsDescriptor\"\337\001\n\017AlarmDescripto" + + "r\022%\n\010alarm_id\030\001 \001(\0132\023.monitoring.AlarmID" + + "\022\031\n\021alarm_description\030\002 \001(\t\022\014\n\004name\030\003 \001(" + + "\t\022!\n\006kpi_id\030\004 \001(\0132\021.monitoring.KpiId\0222\n\017" + + "kpi_value_range\030\005 \001(\0132\031.monitoring.KpiVa" + + "lueRange\022%\n\ttimestamp\030\006 \001(\0132\022.context.Ti" + + "mestamp\"*\n\007AlarmID\022\037\n\010alarm_id\030\001 \001(\0132\r.c" + + "ontext.Uuid\"}\n\021AlarmSubscription\022%\n\010alar" + + "m_id\030\001 \001(\0132\023.monitoring.AlarmID\022\036\n\026subsc" + + "ription_timeout_s\030\002 \001(\002\022!\n\031subscription_" + + "frequency_ms\030\003 \001(\002\"k\n\rAlarmResponse\022%\n\010a" + + "larm_id\030\001 \001(\0132\023.monitoring.AlarmID\022\014\n\004te" + + "xt\030\002 \001(\t\022%\n\010kpi_list\030\003 \001(\0132\023.monitoring." + + "KpiList\"B\n\tAlarmList\0225\n\020alarm_descriptor" + + "\030\001 \003(\0132\033.monitoring.AlarmDescriptor2\234\t\n\021" + + "MonitoringService\0228\n\006SetKpi\022\031.monitoring" + + ".KpiDescriptor\032\021.monitoring.KpiId\"\000\0220\n\tD" + + "eleteKpi\022\021.monitoring.KpiId\032\016.context.Em" + + "pty\"\000\022B\n\020GetKpiDescriptor\022\021.monitoring.K" + + "piId\032\031.monitoring.KpiDescriptor\"\000\022G\n\024Get" + + "KpiDescriptorList\022\016.context.Empty\032\035.moni" + + "toring.KpiDescriptorList\"\000\022/\n\nIncludeKpi" + + "\022\017.monitoring.Kpi\032\016.context.Empty\"\000\022=\n\nM" + + "onitorKpi\022\035.monitoring.MonitorKpiRequest" + + "\032\016.context.Empty\"\000\022?\n\014QueryKpiData\022\024.mon" + + "itoring.KpiQuery\032\027.monitoring.RawKpiTabl" + + "e\"\000\022N\n\022SetKpiSubscription\022\032.monitoring.S" + + "ubsDescriptor\032\030.monitoring.SubsResponse\"" + "\0000\001\022M\n\021GetSubsDescriptor\022\032.monitoring.Su" + "bscriptionID\032\032.monitoring.SubsDescriptor" + - "\"\000\022<\n\020GetSubscriptions\022\016.context.Empty\032\026" + - ".monitoring.SubsIDList\"\000\022B\n\022DeleteSubscr" + - "iption\022\032.monitoring.SubscriptionID\032\016.con" + - "text.Empty\"\000\022A\n\013SetKpiAlarm\022\033.monitoring" + - ".AlarmDescriptor\032\023.monitoring.AlarmID\"\000\022" + - "6\n\tGetAlarms\022\016.context.Empty\032\027.monitorin" + - "g.AlarmIDList\"\000\022H\n\022GetAlarmDescriptor\022\023." + - "monitoring.AlarmID\032\033.monitoring.AlarmDes" + - "criptor\"\000\022V\n\026GetAlarmResponseStream\022\035.mo" + - "nitoring.AlarmSubscription\032\031.monitoring." + - "AlarmResponse\"\0000\001\0224\n\013DeleteAlarm\022\023.monit" + - "oring.AlarmID\032\016.context.Empty\"\000\0226\n\014GetSt" + - "reamKpi\022\021.monitoring.KpiId\032\017.monitoring." + - "Kpi\"\0000\001\0229\n\rGetInstantKpi\022\021.monitoring.Kp" + - "iId\032\023.monitoring.KpiList\"\000b\006proto3" + "\"\000\022:\n\020GetSubscriptions\022\016.context.Empty\032\024" + + ".monitoring.SubsList\"\000\022B\n\022DeleteSubscrip" + + "tion\022\032.monitoring.SubscriptionID\032\016.conte" + + "xt.Empty\"\000\022A\n\013SetKpiAlarm\022\033.monitoring.A" + + "larmDescriptor\032\023.monitoring.AlarmID\"\000\0224\n" + + "\tGetAlarms\022\016.context.Empty\032\025.monitoring." + + "AlarmList\"\000\022H\n\022GetAlarmDescriptor\022\023.moni" + + "toring.AlarmID\032\033.monitoring.AlarmDescrip" + + "tor\"\000\022V\n\026GetAlarmResponseStream\022\035.monito" + + "ring.AlarmSubscription\032\031.monitoring.Alar" + + "mResponse\"\0000\001\0224\n\013DeleteAlarm\022\023.monitorin" + + "g.AlarmID\032\016.context.Empty\"\000\0226\n\014GetStream" + + "Kpi\022\021.monitoring.KpiId\032\017.monitoring.Kpi\"" + + "\0000\001\0225\n\rGetInstantKpi\022\021.monitoring.KpiId\032" + + "\017.monitoring.Kpi\"\000b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -19395,7 +21468,7 @@ public final class Monitoring { internal_static_monitoring_KpiDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiDescriptor_descriptor, - new java.lang.String[] { "KpiId", "KpiDescription", "KpiIdList", "KpiSampleType", "DeviceId", "EndpointId", "ServiceId", "SliceId", }); + new java.lang.String[] { "KpiId", "KpiDescription", "KpiIdList", "KpiSampleType", "DeviceId", "EndpointId", "ServiceId", "SliceId", "ConnectionId", }); internal_static_monitoring_MonitorKpiRequest_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_monitoring_MonitorKpiRequest_fieldAccessorTable = new @@ -19407,97 +21480,115 @@ public final class Monitoring { internal_static_monitoring_KpiQuery_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiQuery_descriptor, - new java.lang.String[] { "KpiId", "MonitoringWindowS", "SamplingRateS", "LastNSamples", "StartTimestamp", "EndTimestamp", }); - internal_static_monitoring_KpiId_descriptor = + new java.lang.String[] { "KpiIds", "MonitoringWindowS", "LastNSamples", "StartTimestamp", "EndTimestamp", }); + internal_static_monitoring_RawKpi_descriptor = getDescriptor().getMessageTypes().get(3); + internal_static_monitoring_RawKpi_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_monitoring_RawKpi_descriptor, + new java.lang.String[] { "Timestamp", "KpiValue", }); + internal_static_monitoring_RawKpiList_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_monitoring_RawKpiList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_monitoring_RawKpiList_descriptor, + new java.lang.String[] { "KpiId", "RawKpis", }); + internal_static_monitoring_RawKpiTable_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_monitoring_RawKpiTable_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_monitoring_RawKpiTable_descriptor, + new java.lang.String[] { "RawKpiLists", }); + internal_static_monitoring_KpiId_descriptor = + getDescriptor().getMessageTypes().get(6); internal_static_monitoring_KpiId_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiId_descriptor, new java.lang.String[] { "KpiId", }); internal_static_monitoring_Kpi_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(7); internal_static_monitoring_Kpi_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_Kpi_descriptor, new java.lang.String[] { "KpiId", "Timestamp", "KpiValue", }); internal_static_monitoring_KpiValueRange_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(8); internal_static_monitoring_KpiValueRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiValueRange_descriptor, new java.lang.String[] { "KpiMinValue", "KpiMaxValue", "InRange", "IncludeMinValue", "IncludeMaxValue", }); internal_static_monitoring_KpiValue_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(9); internal_static_monitoring_KpiValue_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiValue_descriptor, new java.lang.String[] { "Int32Val", "Uint32Val", "Int64Val", "Uint64Val", "FloatVal", "StringVal", "BoolVal", "Value", }); internal_static_monitoring_KpiList_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(10); internal_static_monitoring_KpiList_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiList_descriptor, - new java.lang.String[] { "KpiList", }); + new java.lang.String[] { "Kpi", }); internal_static_monitoring_KpiDescriptorList_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(11); internal_static_monitoring_KpiDescriptorList_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_KpiDescriptorList_descriptor, new java.lang.String[] { "KpiDescriptorList", }); internal_static_monitoring_SubsDescriptor_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(12); internal_static_monitoring_SubsDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_SubsDescriptor_descriptor, new java.lang.String[] { "SubsId", "KpiId", "SamplingDurationS", "SamplingIntervalS", "StartTimestamp", "EndTimestamp", }); internal_static_monitoring_SubscriptionID_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(13); internal_static_monitoring_SubscriptionID_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_SubscriptionID_descriptor, new java.lang.String[] { "SubsId", }); internal_static_monitoring_SubsResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(14); internal_static_monitoring_SubsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_SubsResponse_descriptor, new java.lang.String[] { "SubsId", "KpiList", }); - internal_static_monitoring_SubsIDList_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_monitoring_SubsIDList_fieldAccessorTable = new + internal_static_monitoring_SubsList_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_monitoring_SubsList_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_monitoring_SubsIDList_descriptor, - new java.lang.String[] { "SubsList", }); + internal_static_monitoring_SubsList_descriptor, + new java.lang.String[] { "SubsDescriptor", }); internal_static_monitoring_AlarmDescriptor_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(16); internal_static_monitoring_AlarmDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_AlarmDescriptor_descriptor, new java.lang.String[] { "AlarmId", "AlarmDescription", "Name", "KpiId", "KpiValueRange", "Timestamp", }); internal_static_monitoring_AlarmID_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(17); internal_static_monitoring_AlarmID_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_AlarmID_descriptor, new java.lang.String[] { "AlarmId", }); internal_static_monitoring_AlarmSubscription_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(18); internal_static_monitoring_AlarmSubscription_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_AlarmSubscription_descriptor, - new java.lang.String[] { "AlarmID", "SubscriptionTimeoutS", "SubscriptionFrequencyMs", }); + new java.lang.String[] { "AlarmId", "SubscriptionTimeoutS", "SubscriptionFrequencyMs", }); internal_static_monitoring_AlarmResponse_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(19); internal_static_monitoring_AlarmResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_monitoring_AlarmResponse_descriptor, - new java.lang.String[] { "AlarmId", "Text", "KpiValue", "Timestamp", }); - internal_static_monitoring_AlarmIDList_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_monitoring_AlarmIDList_fieldAccessorTable = new + new java.lang.String[] { "AlarmId", "Text", "KpiList", }); + internal_static_monitoring_AlarmList_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_monitoring_AlarmList_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_monitoring_AlarmIDList_descriptor, - new java.lang.String[] { "AlarmList", }); + internal_static_monitoring_AlarmList_descriptor, + new java.lang.String[] { "AlarmDescriptor", }); context.ContextOuterClass.getDescriptor(); kpi_sample_types.KpiSampleTypes.getDescriptor(); } diff --git a/src/automation/target/generated-sources/grpc/monitoring/MonitoringService.java b/src/automation/target/generated-sources/grpc/monitoring/MonitoringService.java index 6372600680d57d0b351e7dd67b88c84f9d8e8cff..0ce30559b2c0a1bb6236431482d6b99b82cf0842 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/MonitoringService.java +++ b/src/automation/target/generated-sources/grpc/monitoring/MonitoringService.java @@ -20,26 +20,26 @@ public interface MonitoringService extends MutinyService { io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> monitorKpi(monitoring.Monitoring.MonitorKpiRequest request); - io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> queryKpiData(monitoring.Monitoring.KpiQuery request); + io.smallrye.mutiny.Uni<monitoring.Monitoring.RawKpiTable> queryKpiData(monitoring.Monitoring.KpiQuery request); io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsDescriptor> getSubsDescriptor(monitoring.Monitoring.SubscriptionID request); - io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsIDList> getSubscriptions(context.ContextOuterClass.Empty request); + io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsList> getSubscriptions(context.ContextOuterClass.Empty request); io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> deleteSubscription(monitoring.Monitoring.SubscriptionID request); io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmID> setKpiAlarm(monitoring.Monitoring.AlarmDescriptor request); - io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmIDList> getAlarms(context.ContextOuterClass.Empty request); + io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmList> getAlarms(context.ContextOuterClass.Empty request); io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmDescriptor> getAlarmDescriptor(monitoring.Monitoring.AlarmID request); io.smallrye.mutiny.Uni<context.ContextOuterClass.Empty> deleteAlarm(monitoring.Monitoring.AlarmID request); - io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> getInstantKpi(monitoring.Monitoring.KpiId request); + io.smallrye.mutiny.Uni<monitoring.Monitoring.Kpi> getInstantKpi(monitoring.Monitoring.KpiId request); - io.smallrye.mutiny.Multi<monitoring.Monitoring.KpiList> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request); + io.smallrye.mutiny.Multi<monitoring.Monitoring.SubsResponse> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request); io.smallrye.mutiny.Multi<monitoring.Monitoring.AlarmResponse> getAlarmResponseStream(monitoring.Monitoring.AlarmSubscription request); diff --git a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceBean.java b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceBean.java index 21f7f48acd6b6870584133dc3d665f681e78cf5e..cbc984e7132bdbf22c9b99a510106c5c6f4cbda7 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceBean.java +++ b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceBean.java @@ -64,7 +64,7 @@ public class MonitoringServiceBean extends MutinyMonitoringServiceGrpc.Monitorin } } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> queryKpiData(monitoring.Monitoring.KpiQuery request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.RawKpiTable> queryKpiData(monitoring.Monitoring.KpiQuery request) { try { return delegate.queryKpiData(request); } catch (UnsupportedOperationException e) { @@ -80,7 +80,7 @@ public class MonitoringServiceBean extends MutinyMonitoringServiceGrpc.Monitorin } } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsIDList> getSubscriptions(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsList> getSubscriptions(context.ContextOuterClass.Empty request) { try { return delegate.getSubscriptions(request); } catch (UnsupportedOperationException e) { @@ -104,7 +104,7 @@ public class MonitoringServiceBean extends MutinyMonitoringServiceGrpc.Monitorin } } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmIDList> getAlarms(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmList> getAlarms(context.ContextOuterClass.Empty request) { try { return delegate.getAlarms(request); } catch (UnsupportedOperationException e) { @@ -128,7 +128,7 @@ public class MonitoringServiceBean extends MutinyMonitoringServiceGrpc.Monitorin } } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> getInstantKpi(monitoring.Monitoring.KpiId request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.Kpi> getInstantKpi(monitoring.Monitoring.KpiId request) { try { return delegate.getInstantKpi(request); } catch (UnsupportedOperationException e) { @@ -137,7 +137,7 @@ public class MonitoringServiceBean extends MutinyMonitoringServiceGrpc.Monitorin } @Override - public io.smallrye.mutiny.Multi<monitoring.Monitoring.KpiList> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { + public io.smallrye.mutiny.Multi<monitoring.Monitoring.SubsResponse> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { try { return delegate.setKpiSubscription(request); } catch (UnsupportedOperationException e) { diff --git a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceClient.java b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceClient.java index 6b6dc38645931ad94287b4151019c3b42a1c098d..0e8ff5d1b8929694b49548984cd7d53f9c8f68a4 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceClient.java +++ b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceClient.java @@ -45,7 +45,7 @@ public class MonitoringServiceClient implements MonitoringService, MutinyClient< return stub.monitorKpi(request); } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> queryKpiData(monitoring.Monitoring.KpiQuery request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.RawKpiTable> queryKpiData(monitoring.Monitoring.KpiQuery request) { return stub.queryKpiData(request); } @Override @@ -53,7 +53,7 @@ public class MonitoringServiceClient implements MonitoringService, MutinyClient< return stub.getSubsDescriptor(request); } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsIDList> getSubscriptions(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsList> getSubscriptions(context.ContextOuterClass.Empty request) { return stub.getSubscriptions(request); } @Override @@ -65,7 +65,7 @@ public class MonitoringServiceClient implements MonitoringService, MutinyClient< return stub.setKpiAlarm(request); } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmIDList> getAlarms(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmList> getAlarms(context.ContextOuterClass.Empty request) { return stub.getAlarms(request); } @Override @@ -77,12 +77,12 @@ public class MonitoringServiceClient implements MonitoringService, MutinyClient< return stub.deleteAlarm(request); } @Override - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> getInstantKpi(monitoring.Monitoring.KpiId request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.Kpi> getInstantKpi(monitoring.Monitoring.KpiId request) { return stub.getInstantKpi(request); } @Override - public io.smallrye.mutiny.Multi<monitoring.Monitoring.KpiList> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { + public io.smallrye.mutiny.Multi<monitoring.Monitoring.SubsResponse> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { return stub.setKpiSubscription(request); } diff --git a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceGrpc.java b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceGrpc.java index fe92a7814166b65b12db5d50bb4baaf525c59146..c5f55b3b44c03ea8f5377ce11e3c3e547da5ef06 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/monitoring/MonitoringServiceGrpc.java @@ -201,28 +201,28 @@ public final class MonitoringServiceGrpc { } private static volatile io.grpc.MethodDescriptor<monitoring.Monitoring.KpiQuery, - monitoring.Monitoring.KpiList> getQueryKpiDataMethod; + monitoring.Monitoring.RawKpiTable> getQueryKpiDataMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "QueryKpiData", requestType = monitoring.Monitoring.KpiQuery.class, - responseType = monitoring.Monitoring.KpiList.class, + responseType = monitoring.Monitoring.RawKpiTable.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor<monitoring.Monitoring.KpiQuery, - monitoring.Monitoring.KpiList> getQueryKpiDataMethod() { - io.grpc.MethodDescriptor<monitoring.Monitoring.KpiQuery, monitoring.Monitoring.KpiList> getQueryKpiDataMethod; + monitoring.Monitoring.RawKpiTable> getQueryKpiDataMethod() { + io.grpc.MethodDescriptor<monitoring.Monitoring.KpiQuery, monitoring.Monitoring.RawKpiTable> getQueryKpiDataMethod; if ((getQueryKpiDataMethod = MonitoringServiceGrpc.getQueryKpiDataMethod) == null) { synchronized (MonitoringServiceGrpc.class) { if ((getQueryKpiDataMethod = MonitoringServiceGrpc.getQueryKpiDataMethod) == null) { MonitoringServiceGrpc.getQueryKpiDataMethod = getQueryKpiDataMethod = - io.grpc.MethodDescriptor.<monitoring.Monitoring.KpiQuery, monitoring.Monitoring.KpiList>newBuilder() + io.grpc.MethodDescriptor.<monitoring.Monitoring.KpiQuery, monitoring.Monitoring.RawKpiTable>newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "QueryKpiData")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( monitoring.Monitoring.KpiQuery.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( - monitoring.Monitoring.KpiList.getDefaultInstance())) + monitoring.Monitoring.RawKpiTable.getDefaultInstance())) .setSchemaDescriptor(new MonitoringServiceMethodDescriptorSupplier("QueryKpiData")) .build(); } @@ -232,28 +232,28 @@ public final class MonitoringServiceGrpc { } private static volatile io.grpc.MethodDescriptor<monitoring.Monitoring.SubsDescriptor, - monitoring.Monitoring.KpiList> getSetKpiSubscriptionMethod; + monitoring.Monitoring.SubsResponse> getSetKpiSubscriptionMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "SetKpiSubscription", requestType = monitoring.Monitoring.SubsDescriptor.class, - responseType = monitoring.Monitoring.KpiList.class, + responseType = monitoring.Monitoring.SubsResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor<monitoring.Monitoring.SubsDescriptor, - monitoring.Monitoring.KpiList> getSetKpiSubscriptionMethod() { - io.grpc.MethodDescriptor<monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.KpiList> getSetKpiSubscriptionMethod; + monitoring.Monitoring.SubsResponse> getSetKpiSubscriptionMethod() { + io.grpc.MethodDescriptor<monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.SubsResponse> getSetKpiSubscriptionMethod; if ((getSetKpiSubscriptionMethod = MonitoringServiceGrpc.getSetKpiSubscriptionMethod) == null) { synchronized (MonitoringServiceGrpc.class) { if ((getSetKpiSubscriptionMethod = MonitoringServiceGrpc.getSetKpiSubscriptionMethod) == null) { MonitoringServiceGrpc.getSetKpiSubscriptionMethod = getSetKpiSubscriptionMethod = - io.grpc.MethodDescriptor.<monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.KpiList>newBuilder() + io.grpc.MethodDescriptor.<monitoring.Monitoring.SubsDescriptor, monitoring.Monitoring.SubsResponse>newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SetKpiSubscription")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( monitoring.Monitoring.SubsDescriptor.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( - monitoring.Monitoring.KpiList.getDefaultInstance())) + monitoring.Monitoring.SubsResponse.getDefaultInstance())) .setSchemaDescriptor(new MonitoringServiceMethodDescriptorSupplier("SetKpiSubscription")) .build(); } @@ -294,28 +294,28 @@ public final class MonitoringServiceGrpc { } private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, - monitoring.Monitoring.SubsIDList> getGetSubscriptionsMethod; + monitoring.Monitoring.SubsList> getGetSubscriptionsMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "GetSubscriptions", requestType = context.ContextOuterClass.Empty.class, - responseType = monitoring.Monitoring.SubsIDList.class, + responseType = monitoring.Monitoring.SubsList.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, - monitoring.Monitoring.SubsIDList> getGetSubscriptionsMethod() { - io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, monitoring.Monitoring.SubsIDList> getGetSubscriptionsMethod; + monitoring.Monitoring.SubsList> getGetSubscriptionsMethod() { + io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, monitoring.Monitoring.SubsList> getGetSubscriptionsMethod; if ((getGetSubscriptionsMethod = MonitoringServiceGrpc.getGetSubscriptionsMethod) == null) { synchronized (MonitoringServiceGrpc.class) { if ((getGetSubscriptionsMethod = MonitoringServiceGrpc.getGetSubscriptionsMethod) == null) { MonitoringServiceGrpc.getGetSubscriptionsMethod = getGetSubscriptionsMethod = - io.grpc.MethodDescriptor.<context.ContextOuterClass.Empty, monitoring.Monitoring.SubsIDList>newBuilder() + io.grpc.MethodDescriptor.<context.ContextOuterClass.Empty, monitoring.Monitoring.SubsList>newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetSubscriptions")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( context.ContextOuterClass.Empty.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( - monitoring.Monitoring.SubsIDList.getDefaultInstance())) + monitoring.Monitoring.SubsList.getDefaultInstance())) .setSchemaDescriptor(new MonitoringServiceMethodDescriptorSupplier("GetSubscriptions")) .build(); } @@ -387,28 +387,28 @@ public final class MonitoringServiceGrpc { } private static volatile io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, - monitoring.Monitoring.AlarmIDList> getGetAlarmsMethod; + monitoring.Monitoring.AlarmList> getGetAlarmsMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "GetAlarms", requestType = context.ContextOuterClass.Empty.class, - responseType = monitoring.Monitoring.AlarmIDList.class, + responseType = monitoring.Monitoring.AlarmList.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, - monitoring.Monitoring.AlarmIDList> getGetAlarmsMethod() { - io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, monitoring.Monitoring.AlarmIDList> getGetAlarmsMethod; + monitoring.Monitoring.AlarmList> getGetAlarmsMethod() { + io.grpc.MethodDescriptor<context.ContextOuterClass.Empty, monitoring.Monitoring.AlarmList> getGetAlarmsMethod; if ((getGetAlarmsMethod = MonitoringServiceGrpc.getGetAlarmsMethod) == null) { synchronized (MonitoringServiceGrpc.class) { if ((getGetAlarmsMethod = MonitoringServiceGrpc.getGetAlarmsMethod) == null) { MonitoringServiceGrpc.getGetAlarmsMethod = getGetAlarmsMethod = - io.grpc.MethodDescriptor.<context.ContextOuterClass.Empty, monitoring.Monitoring.AlarmIDList>newBuilder() + io.grpc.MethodDescriptor.<context.ContextOuterClass.Empty, monitoring.Monitoring.AlarmList>newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetAlarms")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( context.ContextOuterClass.Empty.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( - monitoring.Monitoring.AlarmIDList.getDefaultInstance())) + monitoring.Monitoring.AlarmList.getDefaultInstance())) .setSchemaDescriptor(new MonitoringServiceMethodDescriptorSupplier("GetAlarms")) .build(); } @@ -542,28 +542,28 @@ public final class MonitoringServiceGrpc { } private static volatile io.grpc.MethodDescriptor<monitoring.Monitoring.KpiId, - monitoring.Monitoring.KpiList> getGetInstantKpiMethod; + monitoring.Monitoring.Kpi> getGetInstantKpiMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "GetInstantKpi", requestType = monitoring.Monitoring.KpiId.class, - responseType = monitoring.Monitoring.KpiList.class, + responseType = monitoring.Monitoring.Kpi.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor<monitoring.Monitoring.KpiId, - monitoring.Monitoring.KpiList> getGetInstantKpiMethod() { - io.grpc.MethodDescriptor<monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiList> getGetInstantKpiMethod; + monitoring.Monitoring.Kpi> getGetInstantKpiMethod() { + io.grpc.MethodDescriptor<monitoring.Monitoring.KpiId, monitoring.Monitoring.Kpi> getGetInstantKpiMethod; if ((getGetInstantKpiMethod = MonitoringServiceGrpc.getGetInstantKpiMethod) == null) { synchronized (MonitoringServiceGrpc.class) { if ((getGetInstantKpiMethod = MonitoringServiceGrpc.getGetInstantKpiMethod) == null) { MonitoringServiceGrpc.getGetInstantKpiMethod = getGetInstantKpiMethod = - io.grpc.MethodDescriptor.<monitoring.Monitoring.KpiId, monitoring.Monitoring.KpiList>newBuilder() + io.grpc.MethodDescriptor.<monitoring.Monitoring.KpiId, monitoring.Monitoring.Kpi>newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetInstantKpi")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( monitoring.Monitoring.KpiId.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( - monitoring.Monitoring.KpiList.getDefaultInstance())) + monitoring.Monitoring.Kpi.getDefaultInstance())) .setSchemaDescriptor(new MonitoringServiceMethodDescriptorSupplier("GetInstantKpi")) .build(); } @@ -665,14 +665,14 @@ public final class MonitoringServiceGrpc { /** */ public void queryKpiData(monitoring.Monitoring.KpiQuery request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.RawKpiTable> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getQueryKpiDataMethod(), responseObserver); } /** */ public void setKpiSubscription(monitoring.Monitoring.SubsDescriptor request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsResponse> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getSetKpiSubscriptionMethod(), responseObserver); } @@ -686,7 +686,7 @@ public final class MonitoringServiceGrpc { /** */ public void getSubscriptions(context.ContextOuterClass.Empty request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsIDList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsList> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetSubscriptionsMethod(), responseObserver); } @@ -707,7 +707,7 @@ public final class MonitoringServiceGrpc { /** */ public void getAlarms(context.ContextOuterClass.Empty request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmIDList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmList> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetAlarmsMethod(), responseObserver); } @@ -742,7 +742,7 @@ public final class MonitoringServiceGrpc { /** */ public void getInstantKpi(monitoring.Monitoring.KpiId request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.Kpi> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetInstantKpiMethod(), responseObserver); } @@ -795,14 +795,14 @@ public final class MonitoringServiceGrpc { io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< monitoring.Monitoring.KpiQuery, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.RawKpiTable>( this, METHODID_QUERY_KPI_DATA))) .addMethod( getSetKpiSubscriptionMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< monitoring.Monitoring.SubsDescriptor, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.SubsResponse>( this, METHODID_SET_KPI_SUBSCRIPTION))) .addMethod( getGetSubsDescriptorMethod(), @@ -816,7 +816,7 @@ public final class MonitoringServiceGrpc { io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< context.ContextOuterClass.Empty, - monitoring.Monitoring.SubsIDList>( + monitoring.Monitoring.SubsList>( this, METHODID_GET_SUBSCRIPTIONS))) .addMethod( getDeleteSubscriptionMethod(), @@ -837,7 +837,7 @@ public final class MonitoringServiceGrpc { io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< context.ContextOuterClass.Empty, - monitoring.Monitoring.AlarmIDList>( + monitoring.Monitoring.AlarmList>( this, METHODID_GET_ALARMS))) .addMethod( getGetAlarmDescriptorMethod(), @@ -872,7 +872,7 @@ public final class MonitoringServiceGrpc { io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< monitoring.Monitoring.KpiId, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.Kpi>( this, METHODID_GET_INSTANT_KPI))) .build(); } @@ -943,7 +943,7 @@ public final class MonitoringServiceGrpc { /** */ public void queryKpiData(monitoring.Monitoring.KpiQuery request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.RawKpiTable> responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getQueryKpiDataMethod(), getCallOptions()), request, responseObserver); } @@ -951,7 +951,7 @@ public final class MonitoringServiceGrpc { /** */ public void setKpiSubscription(monitoring.Monitoring.SubsDescriptor request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsResponse> responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getSetKpiSubscriptionMethod(), getCallOptions()), request, responseObserver); } @@ -967,7 +967,7 @@ public final class MonitoringServiceGrpc { /** */ public void getSubscriptions(context.ContextOuterClass.Empty request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsIDList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsList> responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getGetSubscriptionsMethod(), getCallOptions()), request, responseObserver); } @@ -991,7 +991,7 @@ public final class MonitoringServiceGrpc { /** */ public void getAlarms(context.ContextOuterClass.Empty request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmIDList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmList> responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getGetAlarmsMethod(), getCallOptions()), request, responseObserver); } @@ -1031,7 +1031,7 @@ public final class MonitoringServiceGrpc { /** */ public void getInstantKpi(monitoring.Monitoring.KpiId request, - io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList> responseObserver) { + io.grpc.stub.StreamObserver<monitoring.Monitoring.Kpi> responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getGetInstantKpiMethod(), getCallOptions()), request, responseObserver); } @@ -1095,14 +1095,14 @@ public final class MonitoringServiceGrpc { /** */ - public monitoring.Monitoring.KpiList queryKpiData(monitoring.Monitoring.KpiQuery request) { + public monitoring.Monitoring.RawKpiTable queryKpiData(monitoring.Monitoring.KpiQuery request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getQueryKpiDataMethod(), getCallOptions(), request); } /** */ - public java.util.Iterator<monitoring.Monitoring.KpiList> setKpiSubscription( + public java.util.Iterator<monitoring.Monitoring.SubsResponse> setKpiSubscription( monitoring.Monitoring.SubsDescriptor request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getSetKpiSubscriptionMethod(), getCallOptions(), request); @@ -1117,7 +1117,7 @@ public final class MonitoringServiceGrpc { /** */ - public monitoring.Monitoring.SubsIDList getSubscriptions(context.ContextOuterClass.Empty request) { + public monitoring.Monitoring.SubsList getSubscriptions(context.ContextOuterClass.Empty request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getGetSubscriptionsMethod(), getCallOptions(), request); } @@ -1138,7 +1138,7 @@ public final class MonitoringServiceGrpc { /** */ - public monitoring.Monitoring.AlarmIDList getAlarms(context.ContextOuterClass.Empty request) { + public monitoring.Monitoring.AlarmList getAlarms(context.ContextOuterClass.Empty request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getGetAlarmsMethod(), getCallOptions(), request); } @@ -1175,7 +1175,7 @@ public final class MonitoringServiceGrpc { /** */ - public monitoring.Monitoring.KpiList getInstantKpi(monitoring.Monitoring.KpiId request) { + public monitoring.Monitoring.Kpi getInstantKpi(monitoring.Monitoring.KpiId request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getGetInstantKpiMethod(), getCallOptions(), request); } @@ -1245,7 +1245,7 @@ public final class MonitoringServiceGrpc { /** */ - public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.KpiList> queryKpiData( + public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.RawKpiTable> queryKpiData( monitoring.Monitoring.KpiQuery request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getQueryKpiDataMethod(), getCallOptions()), request); @@ -1261,7 +1261,7 @@ public final class MonitoringServiceGrpc { /** */ - public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.SubsIDList> getSubscriptions( + public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.SubsList> getSubscriptions( context.ContextOuterClass.Empty request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getGetSubscriptionsMethod(), getCallOptions()), request); @@ -1285,7 +1285,7 @@ public final class MonitoringServiceGrpc { /** */ - public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.AlarmIDList> getAlarms( + public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.AlarmList> getAlarms( context.ContextOuterClass.Empty request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getGetAlarmsMethod(), getCallOptions()), request); @@ -1309,7 +1309,7 @@ public final class MonitoringServiceGrpc { /** */ - public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.KpiList> getInstantKpi( + public com.google.common.util.concurrent.ListenableFuture<monitoring.Monitoring.Kpi> getInstantKpi( monitoring.Monitoring.KpiId request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getGetInstantKpiMethod(), getCallOptions()), request); @@ -1378,11 +1378,11 @@ public final class MonitoringServiceGrpc { break; case METHODID_QUERY_KPI_DATA: serviceImpl.queryKpiData((monitoring.Monitoring.KpiQuery) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver); + (io.grpc.stub.StreamObserver<monitoring.Monitoring.RawKpiTable>) responseObserver); break; case METHODID_SET_KPI_SUBSCRIPTION: serviceImpl.setKpiSubscription((monitoring.Monitoring.SubsDescriptor) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver); + (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsResponse>) responseObserver); break; case METHODID_GET_SUBS_DESCRIPTOR: serviceImpl.getSubsDescriptor((monitoring.Monitoring.SubscriptionID) request, @@ -1390,7 +1390,7 @@ public final class MonitoringServiceGrpc { break; case METHODID_GET_SUBSCRIPTIONS: serviceImpl.getSubscriptions((context.ContextOuterClass.Empty) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsIDList>) responseObserver); + (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsList>) responseObserver); break; case METHODID_DELETE_SUBSCRIPTION: serviceImpl.deleteSubscription((monitoring.Monitoring.SubscriptionID) request, @@ -1402,7 +1402,7 @@ public final class MonitoringServiceGrpc { break; case METHODID_GET_ALARMS: serviceImpl.getAlarms((context.ContextOuterClass.Empty) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmIDList>) responseObserver); + (io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmList>) responseObserver); break; case METHODID_GET_ALARM_DESCRIPTOR: serviceImpl.getAlarmDescriptor((monitoring.Monitoring.AlarmID) request, @@ -1422,7 +1422,7 @@ public final class MonitoringServiceGrpc { break; case METHODID_GET_INSTANT_KPI: serviceImpl.getInstantKpi((monitoring.Monitoring.KpiId) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver); + (io.grpc.stub.StreamObserver<monitoring.Monitoring.Kpi>) responseObserver); break; default: throw new AssertionError(); diff --git a/src/automation/target/generated-sources/grpc/monitoring/MutinyMonitoringServiceGrpc.java b/src/automation/target/generated-sources/grpc/monitoring/MutinyMonitoringServiceGrpc.java index d663b38c923a2b5401642db4e697e16be4720f05..f045ecc7ed434ba90bdfda065f18e0d839850a76 100644 --- a/src/automation/target/generated-sources/grpc/monitoring/MutinyMonitoringServiceGrpc.java +++ b/src/automation/target/generated-sources/grpc/monitoring/MutinyMonitoringServiceGrpc.java @@ -66,7 +66,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> queryKpiData(monitoring.Monitoring.KpiQuery request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.RawKpiTable> queryKpiData(monitoring.Monitoring.KpiQuery request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::queryKpiData); } @@ -76,7 +76,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsIDList> getSubscriptions(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsList> getSubscriptions(context.ContextOuterClass.Empty request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::getSubscriptions); } @@ -91,7 +91,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmIDList> getAlarms(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmList> getAlarms(context.ContextOuterClass.Empty request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::getAlarms); } @@ -106,12 +106,12 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> getInstantKpi(monitoring.Monitoring.KpiId request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.Kpi> getInstantKpi(monitoring.Monitoring.KpiId request) { return io.quarkus.grpc.runtime.ClientCalls.oneToOne(request, delegateStub::getInstantKpi); } - public io.smallrye.mutiny.Multi<monitoring.Monitoring.KpiList> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { + public io.smallrye.mutiny.Multi<monitoring.Monitoring.SubsResponse> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { return io.quarkus.grpc.runtime.ClientCalls.oneToMany(request, delegateStub::setKpiSubscription); } @@ -173,7 +173,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> queryKpiData(monitoring.Monitoring.KpiQuery request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.RawKpiTable> queryKpiData(monitoring.Monitoring.KpiQuery request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -183,7 +183,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsIDList> getSubscriptions(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.SubsList> getSubscriptions(context.ContextOuterClass.Empty request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -198,7 +198,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmIDList> getAlarms(context.ContextOuterClass.Empty request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.AlarmList> getAlarms(context.ContextOuterClass.Empty request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -213,12 +213,12 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim } - public io.smallrye.mutiny.Uni<monitoring.Monitoring.KpiList> getInstantKpi(monitoring.Monitoring.KpiId request) { + public io.smallrye.mutiny.Uni<monitoring.Monitoring.Kpi> getInstantKpi(monitoring.Monitoring.KpiId request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } - public io.smallrye.mutiny.Multi<monitoring.Monitoring.KpiList> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { + public io.smallrye.mutiny.Multi<monitoring.Monitoring.SubsResponse> setKpiSubscription(monitoring.Monitoring.SubsDescriptor request) { throw new io.grpc.StatusRuntimeException(io.grpc.Status.UNIMPLEMENTED); } @@ -281,14 +281,14 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim asyncUnaryCall( new MethodHandlers< monitoring.Monitoring.KpiQuery, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.RawKpiTable>( this, METHODID_QUERY_KPI_DATA, compression))) .addMethod( monitoring.MonitoringServiceGrpc.getSetKpiSubscriptionMethod(), asyncServerStreamingCall( new MethodHandlers< monitoring.Monitoring.SubsDescriptor, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.SubsResponse>( this, METHODID_SET_KPI_SUBSCRIPTION, compression))) .addMethod( monitoring.MonitoringServiceGrpc.getGetSubsDescriptorMethod(), @@ -302,7 +302,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim asyncUnaryCall( new MethodHandlers< context.ContextOuterClass.Empty, - monitoring.Monitoring.SubsIDList>( + monitoring.Monitoring.SubsList>( this, METHODID_GET_SUBSCRIPTIONS, compression))) .addMethod( monitoring.MonitoringServiceGrpc.getDeleteSubscriptionMethod(), @@ -323,7 +323,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim asyncUnaryCall( new MethodHandlers< context.ContextOuterClass.Empty, - monitoring.Monitoring.AlarmIDList>( + monitoring.Monitoring.AlarmList>( this, METHODID_GET_ALARMS, compression))) .addMethod( monitoring.MonitoringServiceGrpc.getGetAlarmDescriptorMethod(), @@ -358,7 +358,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim asyncUnaryCall( new MethodHandlers< monitoring.Monitoring.KpiId, - monitoring.Monitoring.KpiList>( + monitoring.Monitoring.Kpi>( this, METHODID_GET_INSTANT_KPI, compression))) .build(); } @@ -440,13 +440,13 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim break; case METHODID_QUERY_KPI_DATA: io.quarkus.grpc.runtime.ServerCalls.oneToOne((monitoring.Monitoring.KpiQuery) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver, + (io.grpc.stub.StreamObserver<monitoring.Monitoring.RawKpiTable>) responseObserver, compression, serviceImpl::queryKpiData); break; case METHODID_SET_KPI_SUBSCRIPTION: io.quarkus.grpc.runtime.ServerCalls.oneToMany((monitoring.Monitoring.SubsDescriptor) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver, + (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsResponse>) responseObserver, compression, serviceImpl::setKpiSubscription); break; @@ -458,7 +458,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim break; case METHODID_GET_SUBSCRIPTIONS: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Empty) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsIDList>) responseObserver, + (io.grpc.stub.StreamObserver<monitoring.Monitoring.SubsList>) responseObserver, compression, serviceImpl::getSubscriptions); break; @@ -476,7 +476,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim break; case METHODID_GET_ALARMS: io.quarkus.grpc.runtime.ServerCalls.oneToOne((context.ContextOuterClass.Empty) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmIDList>) responseObserver, + (io.grpc.stub.StreamObserver<monitoring.Monitoring.AlarmList>) responseObserver, compression, serviceImpl::getAlarms); break; @@ -506,7 +506,7 @@ public final class MutinyMonitoringServiceGrpc implements io.quarkus.grpc.runtim break; case METHODID_GET_INSTANT_KPI: io.quarkus.grpc.runtime.ServerCalls.oneToOne((monitoring.Monitoring.KpiId) request, - (io.grpc.stub.StreamObserver<monitoring.Monitoring.KpiList>) responseObserver, + (io.grpc.stub.StreamObserver<monitoring.Monitoring.Kpi>) responseObserver, compression, serviceImpl::getInstantKpi); break; diff --git a/src/common/Constants.py b/src/common/Constants.py index a536ef60047eb1f210f8d98d207134d377adcbed..964d904da704324d6def548103675e815743d818 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -30,8 +30,9 @@ DEFAULT_HTTP_BIND_ADDRESS = '0.0.0.0' DEFAULT_METRICS_PORT = 9192 # Default context and topology UUIDs -DEFAULT_CONTEXT_UUID = 'admin' -DEFAULT_TOPOLOGY_UUID = 'admin' +DEFAULT_CONTEXT_UUID = 'admin' +DEFAULT_TOPOLOGY_UUID = 'admin' # contains the detailed local topology +INTERDOMAIN_TOPOLOGY_UUID = 'inter' # contains the abstract inter-domain topology # Default service names class ServiceNameEnum(Enum): @@ -50,7 +51,7 @@ class ServiceNameEnum(Enum): WEBUI = 'webui' # Used for test and debugging only - DLT_GATEWAY = 'dlt-gateway' + DLT_GATEWAY = 'dltgateway' # Default gRPC service ports DEFAULT_SERVICE_GRPC_PORTS = { diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 98c96d6831ca7381c70975fd60335e8cecfc6e1b..c353708995cd5d8e4a7e2fde8d9bdd03732008eb 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -16,6 +16,9 @@ from enum import Enum class DeviceTypeEnum(Enum): + # Abstractions + NETWORK = 'network' + # Emulated device types EMULATED_DATACENTER = 'emu-datacenter' EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system' diff --git a/src/common/database/api/context/slice/__init__.py b/src/common/database/api/context/slice/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/common/tests/LoadScenario.py b/src/common/tests/LoadScenario.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3940e67b5772f3ba3ec0634c49f26b92bbc571 --- /dev/null +++ b/src/common/tests/LoadScenario.py @@ -0,0 +1,50 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient + +LOGGER = logging.getLogger(__name__) +LOGGERS = { + 'success': LOGGER.info, + 'danger' : LOGGER.error, + 'error' : LOGGER.error, +} + +def load_scenario_from_descriptor( + descriptor_file : str, context_client : ContextClient, device_client : DeviceClient, + service_client : ServiceClient, slice_client : SliceClient +) -> DescriptorLoader: + with open(descriptor_file, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader( + descriptors, + context_client=context_client, device_client=device_client, + service_client=service_client, slice_client=slice_client) + results = descriptor_loader.process() + + num_errors = 0 + for message,level in compose_notifications(results): + LOGGERS.get(level)(message) + if level != 'success': num_errors += 1 + if num_errors > 0: + MSG = 'Failed to load descriptors in file {:s}' + raise Exception(MSG.format(str(descriptor_file))) + + return descriptor_loader \ No newline at end of file diff --git a/src/common/tests/MockServicerImpl_DltGateway.py b/src/common/tests/MockServicerImpl_DltGateway.py index 2d750168238b2a041badd1974f27e57f62363d90..f106519b2695cda519e95a79e7b559dd24818108 100644 --- a/src/common/tests/MockServicerImpl_DltGateway.py +++ b/src/common/tests/MockServicerImpl_DltGateway.py @@ -36,6 +36,10 @@ class AlreadyExistsException(Exception): class DoesNotExistException(Exception): pass +MSG_NOT_EXISTS = 'RecordId({:s}, {:s}, {:s}) Does Not Exist' +MSG_ALREADY_EXISTS = 'RecordId({:s}, {:s}, {:s}) Already Exists' +MSG_OPERATION_NOT_IMPLEMENTED = 'DltRecordOperationEnum({:s}) Not Implemented' + class MockServicerImpl_DltGateway(DltGatewayServiceServicer): def __init__(self): LOGGER.info('[__init__] Creating Servicer...') @@ -43,16 +47,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): self.msg_broker = MockMessageBroker() LOGGER.info('[__init__] Servicer Created') - def __get_record(self, record_id : DltRecordId, should_exist : bool) -> Optional[Dict]: + def __get_record(self, record_id : DltRecordId) -> Optional[Dict]: domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid str_type = DltRecordTypeEnum.Name(record_id.type).upper().replace('DLTRECORDTYPE_', '') records_domain : Dict[str, Dict] = self.records.setdefault(domain_uuid, {}) records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) - if should_exist and record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) - elif not should_exist and record is not None: - raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) return record def __set_record(self, record_id : DltRecordId, should_exist : bool, data_json : str) -> None: @@ -62,10 +62,10 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) if should_exist and record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid)) elif not should_exist and record is not None: - raise AlreadyExistsException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) - records_type[record_uuid] = json.loads(data_json) + raise AlreadyExistsException(MSG_ALREADY_EXISTS.format(domain_uuid, str_type, record_uuid)) + records_type[record_uuid] = data_json def __del_record(self, record_id : DltRecordId) -> None: domain_uuid, record_uuid = record_id.domain_uuid.uuid, record_id.record_uuid.uuid @@ -74,7 +74,7 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): records_type : Dict[str, Dict] = records_domain.setdefault(str_type, {}) record : Optional[Dict] = records_type.get(record_uuid) if record is None: - raise DoesNotExistException('RecordId({:s}, {:s}, {:s})'.format(domain_uuid, str_type, record_uuid)) + raise DoesNotExistException(MSG_NOT_EXISTS.format(domain_uuid, str_type, record_uuid)) records_type.discard(record_uuid) def __publish(self, operation : DltRecordOperationEnum, record_id : DltRecordId) -> None: @@ -99,14 +99,14 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): try: operation : DltRecordOperationEnum = request.operation if operation == DLTRECORDOPERATION_ADD: - self.__set_record(record_id, False, request.data_json) + self.__set_record(record_id, False, json.loads(request.data_json)) elif operation == DLTRECORDOPERATION_UPDATE: - self.__set_record(record_id, True, request.data_json) + self.__set_record(record_id, True, json.loads(request.data_json)) elif operation == DLTRECORDOPERATION_DELETE: self.__del_record(record_id) else: str_operation = DltRecordOperationEnum.Name(operation).upper().replace('DLTRECORDOPERATION_', '') - raise NotImplementedError('DltRecordOperationEnum({:s})'.format(str_operation)) + raise NotImplementedError(MSG_OPERATION_NOT_IMPLEMENTED.format(str_operation)) self.__publish(operation, record_id) response.status = DLTRECORDSTATUS_SUCCEEDED except Exception as e: # pylint: disable=broad-except @@ -117,11 +117,12 @@ class MockServicerImpl_DltGateway(DltGatewayServiceServicer): def GetFromDlt(self, request : DltRecordId, context : grpc.ServicerContext) -> DltRecord: LOGGER.info('[GetFromDlt] request={:s}'.format(grpc_message_to_json_string(request))) - record = self.__get_record(request, True) + record = self.__get_record(request) response = DltRecord() - response.record_id.CopyFrom(request) # pylint: disable=no-member - response.operation = DLTRECORDOPERATION_UNDEFINED - response.data_json = json.dumps(record, sort_keys=True) + if record is not None: + response.record_id.CopyFrom(request) # pylint: disable=no-member + response.operation = DLTRECORDOPERATION_UNDEFINED + response.data_json = json.dumps(record, sort_keys=True) LOGGER.info('[GetFromDlt] response={:s}'.format(grpc_message_to_json_string(response))) return response diff --git a/src/common/tools/context_queries/CheckType.py b/src/common/tools/context_queries/CheckType.py new file mode 100644 index 0000000000000000000000000000000000000000..f53ad16906336182311d1d98fec428f1472bf748 --- /dev/null +++ b/src/common/tools/context_queries/CheckType.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union +from common.DeviceTypes import DeviceTypeEnum + +def device_type_is_datacenter(device_type : Union[str, DeviceTypeEnum]) -> bool: + return device_type in { + DeviceTypeEnum.DATACENTER, DeviceTypeEnum.DATACENTER.value, + DeviceTypeEnum.EMULATED_DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER.value + } + +def device_type_is_network(device_type : Union[str, DeviceTypeEnum]) -> bool: + return device_type in {DeviceTypeEnum.NETWORK, DeviceTypeEnum.NETWORK.value} + +def endpoint_type_is_border(endpoint_type : str) -> bool: + return str(endpoint_type).endswith('/border') diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0d3be2b7c1890e486492ad55add19a17591353 --- /dev/null +++ b/src/common/tools/context_queries/Context.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import Context, Empty +from common.tools.object_factory.Context import json_context +from context.client.ContextClient import ContextClient + +def create_context( + context_client : ContextClient, context_uuid : str +) -> None: + existing_context_ids = context_client.ListContextIds(Empty()) + existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids} + if context_uuid in existing_context_uuids: return + context_client.SetContext(Context(**json_context(context_uuid))) diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b205d46185e12fa51a2cbd8146342abe5bed38 --- /dev/null +++ b/src/common/tools/context_queries/Device.py @@ -0,0 +1,59 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Set +from common.proto.context_pb2 import ContextId, Device, Empty, Topology, TopologyId +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient + +def get_existing_device_uuids(context_client : ContextClient) -> Set[str]: + existing_device_ids = context_client.ListDeviceIds(Empty()) + existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids} + return existing_device_uuids + +def add_device_to_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str, device_uuid : str +) -> bool: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology_ro = context_client.GetTopology(topology_id) + device_uuids = {device_id.device_uuid.uuid for device_id in topology_ro.device_ids} + if device_uuid in device_uuids: return False # already existed + + topology_rw = Topology() + topology_rw.CopyFrom(topology_ro) + topology_rw.device_ids.add().device_uuid.uuid = device_uuid # pylint: disable=no-member + context_client.SetTopology(topology_rw) + return True + +def get_uuids_of_devices_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[str]: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology = context_client.GetTopology(topology_id) + device_uuids = [device_id.device_uuid.uuid for device_id in topology.device_ids] + return device_uuids + +def get_devices_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[Device]: + device_uuids = get_uuids_of_devices_in_topology(context_client, context_id, topology_uuid) + + all_devices = context_client.ListDevices(Empty()) + devices_in_topology = list() + for device in all_devices.devices: + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in device_uuids: continue + devices_in_topology.append(device) + + return devices_in_topology diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py new file mode 100644 index 0000000000000000000000000000000000000000..0a202ccd810ed50beca4bb9a7b4441305623f1ed --- /dev/null +++ b/src/common/tools/context_queries/InterDomain.py @@ -0,0 +1,256 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice +from common.proto.pathcomp_pb2 import PathCompRequest +from common.tools.context_queries.CheckType import device_type_is_network +from common.tools.context_queries.Device import get_devices_in_topology +from common.tools.context_queries.Topology import get_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from pathcomp.frontend.client.PathCompClient import PathCompClient + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DATACENTER} + +def get_local_device_uuids(context_client : ContextClient) -> Set[str]: + topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID) + topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies} + LOGGER.info('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) + + local_topology_uuids = set(topologies.keys()) + local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + LOGGER.info('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) + + local_device_uuids = set() + + # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are abstracted as a + # local device in inter-domain and the name of the topology is used as abstract device name + for local_topology_uuid in local_topology_uuids: + if local_topology_uuid == DEFAULT_TOPOLOGY_UUID: continue + local_device_uuids.add(local_topology_uuid) + + # add physical devices in the local topologies + for local_topology_uuid in local_topology_uuids: + topology_device_ids = topologies[local_topology_uuid].device_ids + topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids} + LOGGER.info('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format( + str(local_topology_uuid), str(topology_device_uuids))) + local_device_uuids.update(topology_device_uuids) + + LOGGER.info('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids))) + return local_device_uuids + +def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: + context_uuid = DEFAULT_CONTEXT_UUID + topology_uuid = INTERDOMAIN_TOPOLOGY_UUID + interdomain_topology = get_topology(context_client, topology_uuid, context_uuid=context_uuid) + if interdomain_topology is None: + MSG = '[get_interdomain_device_uuids] {:s}/{:s} topology not found' + LOGGER.warning(MSG.format(context_uuid, topology_uuid)) + return set() + + # add abstracted devices in the interdomain topology + interdomain_device_ids = interdomain_topology.device_ids + interdomain_device_uuids = {device_id.device_uuid.uuid for device_id in interdomain_device_ids} + LOGGER.info('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + return interdomain_device_uuids + +def get_local_domain_devices(context_client : ContextClient) -> List[Device]: + local_device_uuids = get_local_device_uuids(context_client) + all_devices = context_client.ListDevices(Empty()) + local_domain_devices = list() + for device in all_devices.devices: + if not device_type_is_network(device.device_type): continue + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in local_device_uuids: continue + local_domain_devices.append(device) + return local_domain_devices + +def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: + interdomain_device_uuids = get_interdomain_device_uuids(context_client) + LOGGER.info('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) + non_interdomain_endpoint_ids = [ + endpoint_id + for endpoint_id in endpoint_ids + if endpoint_id.device_id.device_uuid.uuid not in interdomain_device_uuids + ] + str_non_interdomain_endpoint_ids = [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in non_interdomain_endpoint_ids + ] + LOGGER.info('[is_inter_domain] non_interdomain_endpoint_ids={:s}'.format(str(str_non_interdomain_endpoint_ids))) + is_inter_domain_ = len(non_interdomain_endpoint_ids) == 0 + LOGGER.info('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) + return is_inter_domain_ + +def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: + local_device_uuids = get_local_device_uuids(context_client) + LOGGER.info('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) + remote_endpoint_ids = [ + endpoint_id + for endpoint_id in endpoint_ids + if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids + ] + str_remote_endpoint_ids = [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in remote_endpoint_ids + ] + LOGGER.info('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) + is_multi_domain_ = len(remote_endpoint_ids) > 0 + LOGGER.info('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) + return is_multi_domain_ + +def compute_interdomain_path( + pathcomp_client : PathCompClient, slice_ : Slice +) -> List[Tuple[str, List[EndPointId]]]: + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + slice_uuid = slice_.slice_id.slice_uuid.uuid + + pathcomp_req = PathCompRequest() + pathcomp_req.shortest_path.Clear() # pylint: disable=no-member + pathcomp_req_svc = pathcomp_req.services.add() # pylint: disable=no-member + pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid + pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid + pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + + for endpoint_id in slice_.slice_endpoint_ids: + service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() + service_endpoint_id.CopyFrom(endpoint_id) + + constraint_bw = pathcomp_req_svc.service_constraints.add() + constraint_bw.custom.constraint_type = 'bandwidth[gbps]' + constraint_bw.custom.constraint_value = '10.0' + + constraint_lat = pathcomp_req_svc.service_constraints.add() + constraint_lat.custom.constraint_type = 'latency[ms]' + constraint_lat.custom.constraint_value = '100.0' + + LOGGER.info('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) + pathcomp_rep = pathcomp_client.Compute(pathcomp_req) + LOGGER.info('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + service = next(iter([ + service + for service in pathcomp_rep.services + if service.service_id == pathcomp_req_svc.service_id + ]), None) + if service is None: + str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) + raise Exception('Service({:s}) not found'.format(str_service_id)) + + connection = next(iter([ + connection + for connection in pathcomp_rep.connections + if connection.service_id == pathcomp_req_svc.service_id + ]), None) + if connection is None: + str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) + raise Exception('Connection for Service({:s}) not found'.format(str_service_id)) + + domain_list : List[str] = list() + domain_to_endpoint_ids : Dict[str, List[EndPointId]] = dict() + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + #endpoint_uuid = endpoint_id.endpoint_uuid.uuid + if device_uuid not in domain_to_endpoint_ids: domain_list.append(device_uuid) + domain_to_endpoint_ids.setdefault(device_uuid, []).append(endpoint_id) + + return [ + (domain_uuid, domain_to_endpoint_ids.get(domain_uuid)) + for domain_uuid in domain_list + ] + +def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: + devices_to_domains : Dict[str, str] = dict() + contexts = context_client.ListContexts(Empty()) + for context in contexts.contexts: + context_id = context.context_id + context_uuid = context_id.context_uuid.uuid + topologies = context_client.ListTopologies(context_id) + if context_uuid == DEFAULT_CONTEXT_UUID: + for topology in topologies.topologies: + topology_id = topology.topology_id + topology_uuid = topology_id.topology_uuid.uuid + if topology_uuid in {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID}: continue + + # add topology names except DEFAULT_TOPOLOGY_UUID and INTERDOMAIN_TOPOLOGY_UUID; they are + # abstracted as a local device in inter-domain and the name of the topology is used as + # abstract device name + devices_to_domains[topology_uuid] = topology_uuid + + # add physical devices in the local topology + for device_id in topology.device_ids: + device_uuid = device_id.device_uuid.uuid + devices_to_domains[device_uuid] = topology_uuid + else: + # for each topology in a remote context + for topology in topologies.topologies: + topology_id = topology.topology_id + topology_uuid = topology_id.topology_uuid.uuid + + # if topology is not interdomain + if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: continue + + # add devices to the remote domain list + for device_id in topology.device_ids: + device_uuid = device_id.device_uuid.uuid + devices_to_domains[device_uuid] = context_uuid + + return devices_to_domains + +def compute_traversed_domains( + context_client : ContextClient, interdomain_path : List[Tuple[str, List[EndPointId]]] +) -> List[Tuple[str, bool, List[EndPointId]]]: + + local_device_uuids = get_local_device_uuids(context_client) + LOGGER.info('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) + + interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + interdomain_devices = { + device.device_id.device_uuid.uuid : device + for device in interdomain_devices + } + + devices_to_domains = get_device_to_domain_map(context_client) + LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) + + traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list() + domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict() + for device_uuid, endpoint_ids in interdomain_path: + domain_uuid = devices_to_domains.get(device_uuid, '---') + domain = domains_dict.get(domain_uuid) + if domain is None: + is_local_domain = domain_uuid in local_device_uuids + domain = (domain_uuid, is_local_domain, []) + traversed_domains.append(domain) + domains_dict[domain_uuid] = domain + domain[2].extend(endpoint_ids) + + str_traversed_domains = [ + (domain_uuid, is_local_domain, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]) + for domain_uuid,is_local_domain,endpoint_ids in traversed_domains + ] + LOGGER.info('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) + return traversed_domains diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py new file mode 100644 index 0000000000000000000000000000000000000000..abc5fa91af8d24c8a3cdf18fda0e7680da9143a7 --- /dev/null +++ b/src/common/tools/context_queries/Link.py @@ -0,0 +1,59 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Set +from common.proto.context_pb2 import ContextId, Empty, Link, Topology, TopologyId +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient + +def get_existing_link_uuids(context_client : ContextClient) -> Set[str]: + existing_link_ids = context_client.ListLinkIds(Empty()) + existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids} + return existing_link_uuids + +def add_link_to_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str, link_uuid : str +) -> bool: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology_ro = context_client.GetTopology(topology_id) + link_uuids = {link_id.link_uuid.uuid for link_id in topology_ro.link_ids} + if link_uuid in link_uuids: return False # already existed + + topology_rw = Topology() + topology_rw.CopyFrom(topology_ro) + topology_rw.link_ids.add().link_uuid.uuid = link_uuid # pylint: disable=no-member + context_client.SetTopology(topology_rw) + return True + +def get_uuids_of_links_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[str]: + topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=context_id)) + topology = context_client.GetTopology(topology_id) + link_uuids = [link_id.link_uuid.uuid for link_id in topology.link_ids] + return link_uuids + +def get_links_in_topology( + context_client : ContextClient, context_id : ContextId, topology_uuid : str +) -> List[Link]: + link_uuids = get_uuids_of_links_in_topology(context_client, context_id, topology_uuid) + + all_links = context_client.ListLinks(Empty()) + links_in_topology = list() + for link in all_links.links: + link_uuid = link.link_id.link_uuid.uuid + if link_uuid not in link_uuids: continue + links_in_topology.append(link) + + return links_in_topology diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py new file mode 100644 index 0000000000000000000000000000000000000000..15b201e731760068457683d9e30f79ab12d231d7 --- /dev/null +++ b/src/common/tools/context_queries/Service.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import Service, ServiceId +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def get_service( + context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Service]: + try: + # pylint: disable=no-member + service_id = ServiceId() + service_id.context_id.context_uuid.uuid = context_uuid + service_id.service_uuid.uuid = service_uuid + ro_service = context_client.GetService(service_id) + if not rw_copy: return ro_service + rw_service = Service() + rw_service.CopyFrom(ro_service) + return rw_service + except grpc.RpcError: + #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid))) + return None diff --git a/src/common/tools/context_queries/Slice.py b/src/common/tools/context_queries/Slice.py new file mode 100644 index 0000000000000000000000000000000000000000..9f884aa94990c28ad786b3243aed948ddc7f9f34 --- /dev/null +++ b/src/common/tools/context_queries/Slice.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import Slice, SliceId +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def get_slice( + context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Slice]: + try: + # pylint: disable=no-member + slice_id = SliceId() + slice_id.context_id.context_uuid.uuid = context_uuid + slice_id.slice_uuid.uuid = slice_uuid + ro_slice = context_client.GetSlice(slice_id) + if not rw_copy: return ro_slice + rw_slice = Slice() + rw_slice.CopyFrom(ro_slice) + return rw_slice + except grpc.RpcError: + #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid))) + return None diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2077e965efb3e78ad9febbe54b4f0aaea5aef6 --- /dev/null +++ b/src/common/tools/context_queries/Topology.py @@ -0,0 +1,63 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from typing import List, Optional +from common.Constants import DEFAULT_CONTEXT_UUID +from common.proto.context_pb2 import ContextId, Topology, TopologyId +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def create_topology( + context_client : ContextClient, context_uuid : str, topology_uuid : str +) -> None: + context_id = ContextId(**json_context_id(context_uuid)) + existing_topology_ids = context_client.ListTopologyIds(context_id) + existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids} + if topology_uuid in existing_topology_uuids: return + context_client.SetTopology(Topology(**json_topology(topology_uuid, context_id=context_id))) + +def create_missing_topologies( + context_client : ContextClient, context_id : ContextId, topology_uuids : List[str] +) -> None: + # Find existing topologies within own context + existing_topology_ids = context_client.ListTopologyIds(context_id) + existing_topology_uuids = {topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids} + + # Create topologies within provided context + for topology_uuid in topology_uuids: + if topology_uuid in existing_topology_uuids: continue + grpc_topology = Topology(**json_topology(topology_uuid, context_id=context_id)) + context_client.SetTopology(grpc_topology) + +def get_topology( + context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID, + rw_copy : bool = False + ) -> Optional[Topology]: + try: + # pylint: disable=no-member + topology_id = TopologyId() + topology_id.context_id.context_uuid.uuid = context_uuid + topology_id.topology_uuid.uuid = topology_uuid + ro_topology = context_client.GetTopology(topology_id) + if not rw_copy: return ro_topology + rw_topology = Topology() + rw_topology.CopyFrom(ro_topology) + return rw_topology + except grpc.RpcError: + #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid))) + return None diff --git a/src/compute/tests/mock_osm/__init__.py b/src/common/tools/context_queries/__init__.py similarity index 100% rename from src/compute/tests/mock_osm/__init__.py rename to src/common/tools/context_queries/__init__.py diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py new file mode 100644 index 0000000000000000000000000000000000000000..f14e2caf6065996ea6223449f309e03d141b5954 --- /dev/null +++ b/src/common/tools/descriptor/Loader.py @@ -0,0 +1,254 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# SDN controller descriptor loader + +# Usage example (WebUI): +# descriptors = json.loads(descriptors_data_from_client) +# descriptor_loader = DescriptorLoader(descriptors) +# results = descriptor_loader.process() +# for message,level in compose_notifications(results): +# flash(message, level) + +# Usage example (pytest): +# with open('path/to/descriptor.json', 'r', encoding='UTF-8') as f: +# descriptors = json.loads(f.read()) +# descriptor_loader = DescriptorLoader( +# descriptors, context_client=..., device_client=..., service_client=..., slice_client=...) +# results = descriptor_loader.process() +# loggers = {'success': LOGGER.info, 'danger': LOGGER.error, 'error': LOGGER.error} +# for message,level in compose_notifications(results): +# loggers.get(level)(message) + +import json +from typing import Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import Connection, Context, Device, Link, Service, Slice, Topology +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient +from .Tools import ( + format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules, + get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, + get_descriptors_add_topologies, split_devices_by_rules) + +ENTITY_TO_TEXT = { + # name => singular, plural + 'context' : ('Context', 'Contexts' ), + 'topology' : ('Topology', 'Topologies' ), + 'device' : ('Device', 'Devices' ), + 'link' : ('Link', 'Links' ), + 'service' : ('Service', 'Services' ), + 'slice' : ('Slice', 'Slices' ), + 'connection': ('Connection', 'Connections'), +} + +ACTION_TO_TEXT = { + # action => infinitive, past + 'add' : ('Add', 'Added'), + 'update' : ('Update', 'Updated'), + 'config' : ('Configure', 'Configured'), +} + +TypeResults = List[Tuple[str, str, int, List[str]]] # entity_name, action, num_ok, list[error] +TypeNotification = Tuple[str, str] # message, level +TypeNotificationList = List[TypeNotification] + +def compose_notifications(results : TypeResults) -> TypeNotificationList: + notifications = [] + for entity_name, action_name, num_ok, error_list in results: + entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] + action_infinitive, action_past = ACTION_TO_TEXT[action_name] + num_err = len(error_list) + for error in error_list: + notifications.append((f'Unable to {action_infinitive} {entity_name_singluar} {error}', 'error')) + if num_ok : notifications.append((f'{str(num_ok)} {entity_name_plural} {action_past}', 'success')) + if num_err: notifications.append((f'{str(num_err)} {entity_name_plural} failed', 'danger')) + return notifications + +class DescriptorLoader: + def __init__( + self, descriptors : Union[str, Dict], + context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None, + service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None + ) -> None: + self.__descriptors = json.loads(descriptors) if isinstance(descriptors, str) else descriptors + self.__dummy_mode = self.__descriptors.get('dummy_mode' , False) + self.__contexts = self.__descriptors.get('contexts' , []) + self.__topologies = self.__descriptors.get('topologies' , []) + self.__devices = self.__descriptors.get('devices' , []) + self.__links = self.__descriptors.get('links' , []) + self.__services = self.__descriptors.get('services' , []) + self.__slices = self.__descriptors.get('slices' , []) + self.__connections = self.__descriptors.get('connections', []) + + self.__contexts_add = None + self.__topologies_add = None + self.__devices_add = None + self.__devices_config = None + self.__services_add = None + self.__slices_add = None + + self.__ctx_cli = ContextClient() if context_client is None else context_client + self.__dev_cli = DeviceClient() if device_client is None else device_client + self.__svc_cli = ServiceClient() if service_client is None else service_client + self.__slc_cli = SliceClient() if slice_client is None else slice_client + + self.__results : TypeResults = list() + + @property + def contexts(self) -> List[Dict]: return self.__contexts + + @property + def num_contexts(self) -> int: return len(self.__contexts) + + @property + def topologies(self) -> Dict[str, List[Dict]]: + _topologies = {} + for topology in self.__topologies: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + _topologies.setdefault(context_uuid, []).append(topology) + return _topologies + + @property + def num_topologies(self) -> Dict[str, int]: + _num_topologies = {} + for topology in self.__topologies: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + _num_topologies[context_uuid] = _num_topologies.get(context_uuid, 0) + 1 + return _num_topologies + + @property + def devices(self) -> List[Dict]: return self.__devices + + @property + def num_devices(self) -> int: return len(self.__devices) + + @property + def links(self) -> List[Dict]: return self.__links + + @property + def num_links(self) -> int: return len(self.__links) + + @property + def services(self) -> Dict[str, List[Dict]]: + _services = {} + for service in self.__services: + context_uuid = service['service_id']['context_id']['context_uuid']['uuid'] + _services.setdefault(context_uuid, []).append(service) + return _services + + @property + def num_services(self) -> Dict[str, int]: + _num_services = {} + for service in self.__services: + context_uuid = service['service_id']['context_id']['context_uuid']['uuid'] + _num_services[context_uuid] = _num_services.get(context_uuid, 0) + 1 + return _num_services + + @property + def slices(self) -> Dict[str, List[Dict]]: + _slices = {} + for slice_ in self.__slices: + context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid'] + _slices.setdefault(context_uuid, []).append(slice_) + return _slices + + @property + def num_slices(self) -> Dict[str, int]: + _num_slices = {} + for slice_ in self.__slices: + context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid'] + _num_slices[context_uuid] = _num_slices.get(context_uuid, 0) + 1 + return _num_slices + + @property + def connections(self) -> List[Dict]: return self.__connections + + @property + def num_connections(self) -> int: return len(self.__connections) + + def process(self) -> TypeResults: + # Format CustomConfigRules in Devices, Services and Slices provided in JSON format + self.__devices = [format_device_custom_config_rules (device ) for device in self.__devices ] + self.__services = [format_service_custom_config_rules(service) for service in self.__services] + self.__slices = [format_slice_custom_config_rules (slice_ ) for slice_ in self.__slices ] + + # Context and Topology require to create the entity first, and add devices, links, services, + # slices, etc. in a second stage. + self.__contexts_add = get_descriptors_add_contexts(self.__contexts) + self.__topologies_add = get_descriptors_add_topologies(self.__topologies) + + if self.__dummy_mode: + self._dummy_mode() + else: + self._normal_mode() + + return self.__results + + def _dummy_mode(self) -> None: + # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks. + self.__ctx_cli.connect() + self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add ) + self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add) + self._process_descr('device', 'add', self.__ctx_cli.SetDevice, Device, self.__devices ) + self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links ) + self._process_descr('service', 'add', self.__ctx_cli.SetService, Service, self.__services ) + self._process_descr('slice', 'add', self.__ctx_cli.SetSlice, Slice, self.__slices ) + self._process_descr('connection', 'add', self.__ctx_cli.SetConnection, Connection, self.__connections ) + self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts ) + self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies ) + #self.__ctx_cli.close() + + def _normal_mode(self) -> None: + # Normal mode: follows the automated workflows in the different components + assert len(self.__connections) == 0, 'in normal mode, connections should not be set' + + # Device, Service and Slice require to first create the entity and the configure it + self.__devices_add, self.__devices_config = split_devices_by_rules(self.__devices) + self.__services_add = get_descriptors_add_services(self.__services) + self.__slices_add = get_descriptors_add_slices(self.__slices) + + self.__ctx_cli.connect() + self.__dev_cli.connect() + self.__svc_cli.connect() + self.__slc_cli.connect() + + self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add ) + self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add) + self._process_descr('device', 'add', self.__dev_cli.AddDevice, Device, self.__devices_add ) + self._process_descr('device', 'config', self.__dev_cli.ConfigureDevice, Device, self.__devices_config) + self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links ) + self._process_descr('service', 'add', self.__svc_cli.CreateService, Service, self.__services_add ) + self._process_descr('service', 'update', self.__svc_cli.UpdateService, Service, self.__services ) + self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add ) + self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices ) + self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts ) + self._process_descr('topology', 'update', self.__ctx_cli.SetTopology, Topology, self.__topologies ) + + #self.__slc_cli.close() + #self.__svc_cli.close() + #self.__dev_cli.close() + #self.__ctx_cli.close() + + def _process_descr(self, entity_name, action_name, grpc_method, grpc_class, entities) -> None: + num_ok, error_list = 0, [] + for entity in entities: + try: + grpc_method(grpc_class(**entity)) + num_ok += 1 + except Exception as e: # pylint: disable=broad-except + error_list.append(f'{str(entity)}: {str(e)}') + num_err += 1 + self.__results.append((entity_name, action_name, num_ok, error_list)) diff --git a/src/webui/service/main/DescriptorTools.py b/src/common/tools/descriptor/Tools.py similarity index 79% rename from src/webui/service/main/DescriptorTools.py rename to src/common/tools/descriptor/Tools.py index 094be2f7d0cfd69ddb5cddc2238e8cec64c75daa..cc7fa37577ec7b490756078a90aff959658274b4 100644 --- a/src/webui/service/main/DescriptorTools.py +++ b/src/common/tools/descriptor/Tools.py @@ -41,8 +41,8 @@ def get_descriptors_add_services(services : List[Dict]) -> List[Dict]: def get_descriptors_add_slices(slices : List[Dict]) -> List[Dict]: slices_add = [] - for slice in slices: - slice_copy = copy.deepcopy(slice) + for slice_ in slices: + slice_copy = copy.deepcopy(slice_) slice_copy['slice_endpoint_ids'] = [] slice_copy['slice_constraints'] = [] slice_copy['slice_config'] = {'config_rules': []} @@ -59,6 +59,24 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]: config_rule['custom']['resource_value'] = custom_resource_value return config_rules +def format_device_custom_config_rules(device : Dict) -> Dict: + config_rules = device.get('device_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + device['device_config']['config_rules'] = config_rules + return device + +def format_service_custom_config_rules(service : Dict) -> Dict: + config_rules = service.get('service_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + service['service_config']['config_rules'] = config_rules + return service + +def format_slice_custom_config_rules(slice_ : Dict) -> Dict: + config_rules = slice_.get('service_config', {}).get('config_rules', []) + config_rules = format_custom_config_rules(config_rules) + slice_['slice_config']['config_rules'] = config_rules + return slice_ + def split_devices_by_rules(devices : List[Dict]) -> Tuple[List[Dict], List[Dict]]: devices_add = [] devices_config = [] diff --git a/src/common/tools/descriptor/__init__.py b/src/common/tools/descriptor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/common/tools/descriptor/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ofc22/tests/BuildDescriptors.py b/src/common/tools/descriptor/old/BuildDescriptors.py similarity index 100% rename from src/tests/ofc22/tests/BuildDescriptors.py rename to src/common/tools/descriptor/old/BuildDescriptors.py diff --git a/src/common/tools/descriptor/old/LoadDescriptors.py b/src/common/tools/descriptor/old/LoadDescriptors.py new file mode 100644 index 0000000000000000000000000000000000000000..f0b19196afbcd67c1f20263791d20820489b9cf5 --- /dev/null +++ b/src/common/tools/descriptor/old/LoadDescriptors.py @@ -0,0 +1,40 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, sys +from common.Settings import get_setting +from common.proto.context_pb2 import Context, Device, Link, Topology +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def main(): + context_client = ContextClient( + get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + device_client = DeviceClient( + get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + + with open('tests/ofc22/descriptors.json', 'r', encoding='UTF-8') as f: + descriptors = json.loads(f.read()) + + for context in descriptors['contexts' ]: context_client.SetContext (Context (**context )) + for topology in descriptors['topologies']: context_client.SetTopology(Topology(**topology)) + for device in descriptors['devices' ]: device_client .AddDevice (Device (**device )) + for link in descriptors['links' ]: context_client.SetLink (Link (**link )) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/common/tools/grpc/Constraints.py b/src/common/tools/grpc/Constraints.py index a9dd4f40cbd823752b8cc09936ac48ebe32ec1a5..aa95767ab2807e4ac7ac331c47622a8ece0e88ff 100644 --- a/src/common/tools/grpc/Constraints.py +++ b/src/common/tools/grpc/Constraints.py @@ -21,7 +21,33 @@ from typing import Any, Dict, Optional, Tuple from common.proto.context_pb2 import Constraint, EndPointId from common.tools.grpc.Tools import grpc_message_to_json_string -def update_constraint_custom(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint: +def update_constraint_custom_scalar( + constraints, constraint_type : str, value : Any, raise_if_differs : bool = False +) -> Constraint: + + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'custom': continue + if constraint.custom.constraint_type != constraint_type: continue + json_constraint_value = json.loads(constraint.custom.constraint_value) + break # found, end loop + else: + # not found, add it + constraint = constraints.add() # pylint: disable=no-member + constraint.custom.constraint_type = constraint_type + json_constraint_value = None + + if (json_constraint_value is None) or not raise_if_differs: + # missing or raise_if_differs=False, add/update it + json_constraint_value = value + elif json_constraint_value != value: + # exists, differs, and raise_if_differs=True + msg = 'Specified value({:s}) differs existing value({:s})' + raise Exception(msg.format(str(value), str(json_constraint_value))) + + constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True) + return constraint + +def update_constraint_custom_dict(constraints, constraint_type : str, fields : Dict[str, Tuple[Any, bool]]) -> Constraint: # fields: Dict[field_name : str, Tuple[field_value : Any, raise_if_differs : bool]] for constraint in constraints: @@ -45,6 +71,7 @@ def update_constraint_custom(constraints, constraint_type : str, fields : Dict[s raise Exception(msg.format(str(field_name), str(field_value), str(json_constraint_value[field_name]))) constraint.custom.constraint_value = json.dumps(json_constraint_value, sort_keys=True) + return constraint def update_constraint_endpoint_location( constraints, endpoint_id : EndPointId, @@ -129,10 +156,18 @@ def copy_constraints(source_constraints, target_constraints): if constraint_kind == 'custom': custom = source_constraint.custom constraint_type = custom.constraint_type - constraint_value = json.loads(custom.constraint_value) - raise_if_differs = True - fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()} - update_constraint_custom(target_constraints, constraint_type, fields) + try: + constraint_value = json.loads(custom.constraint_value) + except: # pylint: disable=bare-except + constraint_value = custom.constraint_value + if isinstance(constraint_value, dict): + raise_if_differs = True + fields = {name:(value, raise_if_differs) for name,value in constraint_value.items()} + update_constraint_custom_dict(target_constraints, constraint_type, fields) + else: + raise_if_differs = True + update_constraint_custom_scalar( + target_constraints, constraint_type, constraint_value, raise_if_differs=raise_if_differs) elif constraint_kind == 'endpoint_location': endpoint_id = source_constraint.endpoint_location.endpoint_id diff --git a/src/common/tools/object_factory/PolicyRule.py b/src/common/tools/object_factory/PolicyRule.py new file mode 100644 index 0000000000000000000000000000000000000000..8702f931dfffef175ce6c25de24a10de8286effc --- /dev/null +++ b/src/common/tools/object_factory/PolicyRule.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Optional +from common.proto.policy_condition_pb2 import BooleanOperator + +LOGGER = logging.getLogger(__name__) + +def json_policy_rule_id(policy_rule_uuid : str) -> Dict: + return {'uuid': {'uuid': policy_rule_uuid}} + +def json_policy_rule( + policy_rule_uuid : str, policy_priority : int = 1, + boolean_operator : BooleanOperator = BooleanOperator.POLICYRULE_CONDITION_BOOLEAN_AND, + condition_list : List[Dict] = [], action_list : List[Dict] = [], + service_id : Optional[Dict] = None, device_id_list : List[Dict] = [] +) -> Dict: + basic = { + 'policyRuleId': json_policy_rule_id(policy_rule_uuid), + 'priority': policy_priority, + 'conditionList': condition_list, + 'booleanOperator': boolean_operator, + 'actionList': action_list, + } + + result = {} + if service_id is not None: + policy_rule_type = 'service' + result[policy_rule_type] = {'policyRuleBasic': basic} + result[policy_rule_type]['serviceId'] = service_id + else: + policy_rule_type = 'device' + result[policy_rule_type] = {'policyRuleBasic': basic} + + result[policy_rule_type]['deviceList'] = device_id_list + return result diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index 62f3dcbda148f1c624265ae7d76b0c17f5d36959..be8eefe5bc032ad6a45fd54b267db6ab12e3f5b0 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -61,3 +61,13 @@ def json_service_tapi_planned( service_uuid, ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, context_id=json_context_id(context_uuid), status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) + +def json_service_p4_planned( + service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], + config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_UUID + ): + + return json_service( + service_uuid, ServiceTypeEnum.SERVICETYPE_L2NM, context_id=json_context_id(context_uuid), + status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, + config_rules=config_rules) \ No newline at end of file diff --git a/src/common/tools/object_factory/Slice.py b/src/common/tools/object_factory/Slice.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab666aa6ed379eb0b8948b1178aa13069d70bf4 --- /dev/null +++ b/src/common/tools/object_factory/Slice.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from typing import Dict, List, Optional +from common.proto.context_pb2 import SliceStatusEnum + +def get_slice_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: + return 'slc:{:s}/{:s}=={:s}/{:s}'.format( + a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], + z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) + +def json_slice_id(slice_uuid : str, context_id : Optional[Dict] = None) -> Dict: + result = {'slice_uuid': {'uuid': slice_uuid}} + if context_id is not None: result['context_id'] = copy.deepcopy(context_id) + return result + +def json_slice_owner(owner_uuid : str, owner_string : str) -> Dict: + return {'owner_uuid': {'uuid': owner_uuid}, 'owner_string': owner_string} + +def json_slice( + slice_uuid : str, context_id : Optional[Dict] = None, + status : SliceStatusEnum = SliceStatusEnum.SLICESTATUS_PLANNED, endpoint_ids : List[Dict] = [], + constraints : List[Dict] = [], config_rules : List[Dict] = [], service_ids : List[Dict] = [], + subslice_ids : List[Dict] = [], owner : Optional[Dict] = None): + + result = { + 'slice_id' : json_slice_id(slice_uuid, context_id=context_id), + 'slice_status' : {'slice_status': status}, + 'slice_endpoint_ids': copy.deepcopy(endpoint_ids), + 'slice_constraints' : copy.deepcopy(constraints), + 'slice_config' : {'config_rules': copy.deepcopy(config_rules)}, + 'slice_service_ids' : copy.deepcopy(service_ids), + 'slice_subslice_ids': copy.deepcopy(subslice_ids), + } + if owner is not None: result['slice_owner'] = owner + return result diff --git a/src/compute/Dockerfile b/src/compute/Dockerfile index bdc07584c5bd8f08fdef6f997cc18dcfd9eeb3e6..90a69c0f503724fd1098608d85ad5eca874e3f8b 100644 --- a/src/compute/Dockerfile +++ b/src/compute/Dockerfile @@ -66,6 +66,8 @@ COPY src/compute/. compute/ COPY src/context/. context/ COPY src/service/. service/ COPY src/slice/. slice/ +RUN mkdir -p /var/teraflow/tests/tools +COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/ # Start the service ENTRYPOINT ["python", "-m", "compute.service"] diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 7e050289f19b93dc710185c2b29b326bbfd156d2..e3d12088147a59c3fd9e0179d3a3d957483fcc22 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -17,10 +17,10 @@ from flask import request from flask.json import jsonify from flask_restful import Resource from common.proto.context_pb2 import SliceStatusEnum +from common.tools.context_queries.Slice import get_slice from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR LOGGER = logging.getLogger(__name__) @@ -34,7 +34,7 @@ class L2VPN_Service(Resource): try: context_client = ContextClient() - target = get_slice(context_client, vpn_id) + target = get_slice(context_client, vpn_id, rw_copy=True) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 3cc823a2aa7a06de6cb591ef6d668ba7eeef5cbd..819d8995da6ffc3a7913c8781e4021ce83665e29 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -20,9 +20,10 @@ from flask.wrappers import Response from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType from common.proto.context_pb2 import Slice +from common.tools.context_queries.Slice import get_slice from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Constraints import ( - update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority, + update_constraint_custom_dict, update_constraint_endpoint_location, update_constraint_endpoint_priority, update_constraint_sla_availability) from common.tools.grpc.EndPointIds import update_endpoint_ids from common.tools.grpc.Tools import grpc_message_to_json_string @@ -30,7 +31,6 @@ from context.client.ContextClient import ContextClient from slice.client.SliceClient import SliceClient from .schemas.site_network_access import SCHEMA_SITE_NETWORK_ACCESS from .tools.Authentication import HTTP_AUTH -from .tools.ContextMethods import get_slice from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR from .tools.Validator import validate_message from .Constants import ( @@ -69,7 +69,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s address_ip, address_prefix, remote_router, circuit_id ) = mapping - target = get_slice(context_client, vpn_id) + target = get_slice(context_client, vpn_id, rw_copy=True) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) endpoint_ids = target.slice_endpoint_ids # pylint: disable=no-member @@ -99,7 +99,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) if len(diversity_constraints) > 0: - update_constraint_custom(constraints, 'diversity', diversity_constraints) + update_constraint_custom_dict(constraints, 'diversity', diversity_constraints) update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py deleted file mode 100644 index ac9e6fe4a5c138d00bc80fd953de2cc21d4677b5..0000000000000000000000000000000000000000 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/tools/ContextMethods.py +++ /dev/null @@ -1,39 +0,0 @@ -import grpc, logging -from typing import Optional -from common.Constants import DEFAULT_CONTEXT_UUID -from common.proto.context_pb2 import Service, ServiceId, Slice, SliceId -from context.client.ContextClient import ContextClient - -LOGGER = logging.getLogger(__name__) - -def get_service( - context_client : ContextClient, service_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID - ) -> Optional[Service]: - try: - # pylint: disable=no-member - service_id = ServiceId() - service_id.context_id.context_uuid.uuid = context_uuid - service_id.service_uuid.uuid = service_uuid - service_readonly = context_client.GetService(service_id) - service = Service() - service.CopyFrom(service_readonly) - return service - except grpc.RpcError: - #LOGGER.exception('Unable to get service({:s} / {:s})'.format(str(context_uuid), str(service_uuid))) - return None - -def get_slice( - context_client : ContextClient, slice_uuid : str, context_uuid : str = DEFAULT_CONTEXT_UUID - ) -> Optional[Slice]: - try: - # pylint: disable=no-member - slice_id = SliceId() - slice_id.context_id.context_uuid.uuid = context_uuid - slice_id.slice_uuid.uuid = slice_uuid - slice_readonly = context_client.GetSlice(slice_id) - slice_ = Slice() - slice_.CopyFrom(slice_readonly) - return slice_ - except grpc.RpcError: - #LOGGER.exception('Unable to get slice({:s} / {:s})'.format(str(context_uuid), str(slice_uuid))) - return None diff --git a/src/compute/tests/PrepareTestScenario.py b/src/compute/tests/PrepareTestScenario.py index d534a4a28280c80964096a9cb7291c498ebe6b93..06fb34f9ee7508f4bd6fa769da78c50eb78c3bb8 100644 --- a/src/compute/tests/PrepareTestScenario.py +++ b/src/compute/tests/PrepareTestScenario.py @@ -19,7 +19,7 @@ from common.Settings import ( from compute.service.rest_server.RestServer import RestServer from compute.service.rest_server.nbi_plugins.ietf_l2vpn import register_ietf_l2vpn from compute.tests.MockService_Dependencies import MockService_Dependencies -from .mock_osm.MockOSM import MockOSM +from tests.tools.mock_osm.MockOSM import MockOSM from .Constants import WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD LOCAL_HOST = '127.0.0.1' diff --git a/src/compute/tests/test_unitary.py b/src/compute/tests/test_unitary.py index 05c45c1b3554d21084a4a20cac6856b049fe7ca3..acef6d4a68cb1e89df2fa567d437412c8805b35f 100644 --- a/src/compute/tests/test_unitary.py +++ b/src/compute/tests/test_unitary.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from .mock_osm.MockOSM import MockOSM +from tests.tools.mock_osm.MockOSM import MockOSM from .Constants import SERVICE_CONNECTION_POINTS_1, SERVICE_CONNECTION_POINTS_2, SERVICE_TYPE from .PrepareTestScenario import ( # pylint: disable=unused-import # be careful, order of symbols is important here! diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py index da907341f799def94694817242c106a913e03327..f91f36cf5bf73669e4010c8c65d9c4cabd9c6e2e 100644 --- a/src/context/client/ContextClient.py +++ b/src/context/client/ContextClient.py @@ -28,6 +28,8 @@ from common.proto.context_pb2 import ( Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) from common.proto.context_pb2_grpc import ContextServiceStub +from common.proto.context_policy_pb2_grpc import ContextPolicyServiceStub +from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 @@ -42,17 +44,20 @@ class ContextClient: LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) self.channel = None self.stub = None + self.policy_stub = None self.connect() LOGGER.debug('Channel created') def connect(self): self.channel = grpc.insecure_channel(self.endpoint) self.stub = ContextServiceStub(self.channel) + self.policy_stub = ContextPolicyServiceStub(self.channel) def close(self): if self.channel is not None: self.channel.close() self.channel = None self.stub = None + self.policy_stub = None @RETRY_DECORATOR def ListContextIds(self, request: Empty) -> ContextIdList: @@ -361,3 +366,38 @@ class ContextClient: response = self.stub.GetConnectionEvents(request) LOGGER.debug('GetConnectionEvents result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def ListPolicyRuleIds(self, request: Empty) -> PolicyRuleIdList: + LOGGER.debug('ListPolicyRuleIds request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.ListPolicyRuleIds(request) + LOGGER.debug('ListPolicyRuleIds result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def ListPolicyRules(self, request: Empty) -> PolicyRuleList: + LOGGER.debug('ListPolicyRules request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.ListPolicyRules(request) + LOGGER.debug('ListPolicyRules result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def GetPolicyRule(self, request: PolicyRuleId) -> PolicyRule: + LOGGER.info('GetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.GetPolicyRule(request) + LOGGER.info('GetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def SetPolicyRule(self, request: PolicyRule) -> PolicyRuleId: + LOGGER.debug('SetPolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.SetPolicyRule(request) + LOGGER.debug('SetPolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RemovePolicyRule(self, request: PolicyRuleId) -> Empty: + LOGGER.debug('RemovePolicyRule request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.policy_stub.RemovePolicyRule(request) + LOGGER.debug('RemovePolicyRule result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py index 9715098bd3cd979d78a83b4839e40613d3997d1e..f5fc3fbc735c2f62b39223b9ed20aa3730ecd11d 100644 --- a/src/context/client/EventsCollector.py +++ b/src/context/client/EventsCollector.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, queue, threading +from typing import Callable +import grpc, logging, queue, threading, time from common.proto.context_pb2 import Empty from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient @@ -20,6 +21,41 @@ from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +class _Collector(threading.Thread): + def __init__( + self, subscription_func : Callable, events_queue = queue.Queue, + terminate = threading.Event, log_events_received: bool = False + ) -> None: + super().__init__(daemon=False) + self._subscription_func = subscription_func + self._events_queue = events_queue + self._terminate = terminate + self._log_events_received = log_events_received + self._stream = None + + def cancel(self) -> None: + if self._stream is None: return + self._stream.cancel() + + def run(self) -> None: + while not self._terminate.is_set(): + self._stream = self._subscription_func() + try: + for event in self._stream: + if self._log_events_received: + str_event = grpc_message_to_json_string(event) + LOGGER.info('[_collect] event: {:s}'.format(str_event)) + self._events_queue.put_nowait(event) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: + LOGGER.info('[_collect] UNAVAILABLE... retrying...') + time.sleep(0.5) + continue + elif e.code() == grpc.StatusCode.CANCELLED: + break + else: + raise # pragma: no cover + class EventsCollector: def __init__( self, context_client : ContextClient, @@ -31,60 +67,49 @@ class EventsCollector: activate_service_collector : bool = True, activate_slice_collector : bool = True, activate_connection_collector : bool = True, - ) -> None: self._events_queue = queue.Queue() + self._terminate = threading.Event() self._log_events_received = log_events_received - self._context_stream, self._context_thread = None, None - if activate_context_collector: - self._context_stream = context_client.GetContextEvents(Empty()) - self._context_thread = self._create_collector_thread(self._context_stream) - - self._topology_stream, self._topology_thread = None, None - if activate_topology_collector: - self._topology_stream = context_client.GetTopologyEvents(Empty()) - self._topology_thread = self._create_collector_thread(self._topology_stream) - - self._device_stream, self._device_thread = None, None - if activate_device_collector: - self._device_stream = context_client.GetDeviceEvents(Empty()) - self._device_thread = self._create_collector_thread(self._device_stream) - - self._link_stream, self._link_thread = None, None - if activate_link_collector: - self._link_stream = context_client.GetLinkEvents(Empty()) - self._link_thread = self._create_collector_thread(self._link_stream) - - self._service_stream, self._service_thread = None, None - if activate_service_collector: - self._service_stream = context_client.GetServiceEvents(Empty()) - self._service_thread = self._create_collector_thread(self._service_stream) - - self._slice_stream, self._slice_thread = None, None - if activate_slice_collector: - self._slice_stream = context_client.GetSliceEvents(Empty()) - self._slice_thread = self._create_collector_thread(self._slice_stream) - - self._connection_stream, self._connection_thread = None, None - if activate_connection_collector: - self._connection_stream = context_client.GetConnectionEvents(Empty()) - self._connection_thread = self._create_collector_thread(self._connection_stream) - - def _create_collector_thread(self, stream, as_daemon : bool = False): - return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) - - def _collect(self, events_stream) -> None: - try: - for event in events_stream: - if self._log_events_received: - LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) - self._events_queue.put_nowait(event) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member - raise # pragma: no cover + self._context_thread = _Collector( + lambda: context_client.GetContextEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_context_collector else None + + self._topology_thread = _Collector( + lambda: context_client.GetTopologyEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_topology_collector else None + + self._device_thread = _Collector( + lambda: context_client.GetDeviceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_device_collector else None + + self._link_thread = _Collector( + lambda: context_client.GetLinkEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_link_collector else None + + self._service_thread = _Collector( + lambda: context_client.GetServiceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_service_collector else None + + self._slice_thread = _Collector( + lambda: context_client.GetSliceEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_slice_collector else None + + self._connection_thread = _Collector( + lambda: context_client.GetConnectionEvents(Empty()), + self._events_queue, self._terminate, self._log_events_received + ) if activate_connection_collector else None def start(self): + self._terminate.clear() + if self._context_thread is not None: self._context_thread.start() if self._topology_thread is not None: self._topology_thread.start() if self._device_thread is not None: self._device_thread.start() @@ -102,25 +127,28 @@ class EventsCollector: def get_events(self, block : bool = True, timeout : float = 0.1, count : int = None): events = [] if count is None: - while True: + while not self._terminate.is_set(): event = self.get_event(block=block, timeout=timeout) if event is None: break events.append(event) else: for _ in range(count): + if self._terminate.is_set(): break event = self.get_event(block=block, timeout=timeout) if event is None: continue events.append(event) return sorted(events, key=lambda e: e.event.timestamp.timestamp) def stop(self): - if self._context_stream is not None: self._context_stream.cancel() - if self._topology_stream is not None: self._topology_stream.cancel() - if self._device_stream is not None: self._device_stream.cancel() - if self._link_stream is not None: self._link_stream.cancel() - if self._service_stream is not None: self._service_stream.cancel() - if self._slice_stream is not None: self._slice_stream.cancel() - if self._connection_stream is not None: self._connection_stream.cancel() + self._terminate.set() + + if self._context_thread is not None: self._context_thread.cancel() + if self._topology_thread is not None: self._topology_thread.cancel() + if self._device_thread is not None: self._device_thread.cancel() + if self._link_thread is not None: self._link_thread.cancel() + if self._service_thread is not None: self._service_thread.cancel() + if self._slice_thread is not None: self._slice_thread.cancel() + if self._connection_thread is not None: self._connection_thread.cancel() if self._context_thread is not None: self._context_thread.join() if self._topology_thread is not None: self._topology_thread.join() diff --git a/src/context/service/database/PolicyRuleModel.py b/src/context/service/database/PolicyRuleModel.py new file mode 100644 index 0000000000000000000000000000000000000000..7c84ea940482091a5667b2f11272748c7b444b6f --- /dev/null +++ b/src/context/service/database/PolicyRuleModel.py @@ -0,0 +1,32 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import json +from typing import Dict +from common.orm.fields.PrimaryKeyField import PrimaryKeyField +from common.orm.fields.StringField import StringField +from common.orm.model.Model import Model + +LOGGER = logging.getLogger(__name__) + +class PolicyRuleModel(Model): + pk = PrimaryKeyField() + value = StringField(required=True, allow_empty=False) + + def dump_id(self) -> Dict: + return {'uuid': {'uuid': self.pk}} + + def dump(self) -> Dict: + return json.loads(self.value) diff --git a/src/context/service/database/SliceModel.py b/src/context/service/database/SliceModel.py index bc00ada43758c9c5ffefbb88a87134aa46fbd73a..74bb60b401f656fdcfec8b0466019f87a8f1b41e 100644 --- a/src/context/service/database/SliceModel.py +++ b/src/context/service/database/SliceModel.py @@ -46,6 +46,8 @@ class SliceModel(Model): slice_constraints_fk = ForeignKeyField(ConstraintsModel) slice_status = EnumeratedField(ORM_SliceStatusEnum, required=True) slice_config_fk = ForeignKeyField(ConfigModel) + slice_owner_uuid = StringField(required=False, allow_empty=True) + slice_owner_string = StringField(required=False, allow_empty=True) def delete(self) -> None: # pylint: disable=import-outside-toplevel @@ -91,7 +93,11 @@ class SliceModel(Model): def dump_subslice_ids(self) -> List[Dict]: from .RelationModels import SliceSubSliceModel # pylint: disable=import-outside-toplevel db_subslices = get_related_objects(self, SliceSubSliceModel, 'sub_slice_fk') - return [db_subslice.dump_id() for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk'))] + return [ + db_subslice.dump_id() + for db_subslice in sorted(db_subslices, key=operator.attrgetter('pk')) + if db_subslice.pk != self.pk # if I'm subslice of other slice, I will appear as subslice of myself + ] def dump( # pylint: disable=arguments-differ self, include_endpoint_ids=True, include_constraints=True, include_config_rules=True, @@ -106,4 +112,11 @@ class SliceModel(Model): if include_config_rules: result.setdefault('slice_config', {})['config_rules'] = self.dump_config() if include_service_ids: result['slice_service_ids'] = self.dump_service_ids() if include_subslice_ids: result['slice_subslice_ids'] = self.dump_subslice_ids() + + if len(self.slice_owner_uuid) > 0: + result.setdefault('slice_owner', {}).setdefault('owner_uuid', {})['uuid'] = self.slice_owner_uuid + + if len(self.slice_owner_string) > 0: + result.setdefault('slice_owner', {})['owner_string'] = self.slice_owner_string + return result diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/grpc_server/ContextService.py index 1b54ec5400c93cba3882dccb197479b75bb699af..5d4dd8bb991ed64a970f9815bb302fd33d51cf34 100644 --- a/src/context/service/grpc_server/ContextService.py +++ b/src/context/service/grpc_server/ContextService.py @@ -17,6 +17,7 @@ from common.Settings import get_service_port_grpc from common.message_broker.MessageBroker import MessageBroker from common.orm.Database import Database from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server +from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService from .ContextServiceServicerImpl import ContextServiceServicerImpl @@ -31,3 +32,4 @@ class ContextService(GenericGrpcService): def install_servicers(self): add_ContextServiceServicer_to_server(self.context_servicer, self.server) + add_ContextPolicyServiceServicer_to_server(self.context_servicer, self.server) diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py index 88f7bd8af82009f1fc45bace87776d9cbc6d6543..f8dd188198606805e42449c3d690c20d3ad45f03 100644 --- a/src/context/service/grpc_server/ContextServiceServicerImpl.py +++ b/src/context/service/grpc_server/ContextServiceServicerImpl.py @@ -28,13 +28,17 @@ from common.proto.context_pb2 import ( Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList, Slice, SliceEvent, SliceId, SliceIdList, SliceList, Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList) +from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) from common.proto.context_pb2_grpc import ContextServiceServicer +from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException +from common.tools.grpc.Tools import grpc_message_to_json from context.service.database.ConfigModel import update_config from context.service.database.ConnectionModel import ConnectionModel, set_path from context.service.database.ConstraintModel import set_constraints from context.service.database.ContextModel import ContextModel +from context.service.database.PolicyRuleModel import PolicyRuleModel from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types from context.service.database.Events import notify_event @@ -61,11 +65,12 @@ METHOD_NAMES = [ 'ListLinkIds', 'ListLinks', 'GetLink', 'SetLink', 'RemoveLink', 'GetLinkEvents', 'ListServiceIds', 'ListServices', 'GetService', 'SetService', 'RemoveService', 'GetServiceEvents', 'ListSliceIds', 'ListSlices', 'GetSlice', 'SetSlice', 'RemoveSlice', 'GetSliceEvents', + 'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule', 'UnsetService', 'UnsetSlice', ] METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) -class ContextServiceServicerImpl(ContextServiceServicer): +class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer): def __init__(self, database : Database, messagebroker : MessageBroker): LOGGER.debug('Creating Servicer...') self.lock = threading.Lock() @@ -606,6 +611,8 @@ class ContextServiceServicerImpl(ContextServiceServicer): 'slice_constraints_fk': db_constraints, 'slice_status' : grpc_to_enum__slice_status(request.slice_status.slice_status), 'slice_config_fk' : db_running_config, + 'slice_owner_uuid' : request.slice_owner.owner_uuid.uuid, + 'slice_owner_string' : request.slice_owner.owner_string, }) db_slice, updated = result @@ -622,7 +629,7 @@ class ContextServiceServicerImpl(ContextServiceServicer): db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key) - str_slice_endpoint_key = key_to_str([slice_uuid, str_endpoint_key], separator='--') + str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--') result : Tuple[SliceEndPointModel, bool] = get_or_create_object( self.database, SliceEndPointModel, str_slice_endpoint_key, { 'slice_fk': db_slice, 'endpoint_fk': db_endpoint}) @@ -811,3 +818,56 @@ class ContextServiceServicerImpl(ContextServiceServicer): def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]: for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT): yield ConnectionEvent(**json.loads(message.content)) + + + # ----- Policy ----------------------------------------------------------------------------------------------------- + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList: + with self.lock: + db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) + db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) + return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules]) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList: + with self.lock: + db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel) + db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk')) + return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules]) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule: + with self.lock: + policy_rule_uuid = request.uuid.uuid + db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid) + return PolicyRule(**db_policy_rule.dump()) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId: + with self.lock: + policy_rule_type = request.WhichOneof('policy_rule') + policy_rule_json = grpc_message_to_json(request) + policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid'] + result: Tuple[PolicyRuleModel, bool] = update_or_create_object( + self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)}) + db_policy, updated = result # pylint: disable=unused-variable + + #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE + dict_policy_id = db_policy.dump_id() + #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) + return PolicyRuleId(**dict_policy_id) + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty: + with self.lock: + policy_uuid = request.uuid.uuid + db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False) + found = db_policy.load() + if not found: return Empty() + + dict_policy_id = db_policy.dump_id() + db_policy.delete() + #event_type = EventTypeEnum.EVENTTYPE_REMOVE + #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id}) + return Empty() diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/rest_server/Resources.py index d1738edb20361dab70334bc026d94d37c654127a..5f03132a34004388596ce1fdfac470f029c093ea 100644 --- a/src/context/service/rest_server/Resources.py +++ b/src/context/service/rest_server/Resources.py @@ -17,6 +17,7 @@ from flask.json import jsonify from flask_restful import Resource from common.orm.Database import Database from common.proto.context_pb2 import ConnectionId, ContextId, DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from common.proto.policy_pb2 import PolicyRuleId from common.tools.grpc.Tools import grpc_message_to_json from context.service.grpc_server.ContextServiceServicerImpl import ContextServiceServicerImpl @@ -61,6 +62,11 @@ def grpc_topology_id(context_uuid, topology_uuid): 'topology_uuid': {'uuid': topology_uuid} }) +def grpc_policy_rule_id(policy_rule_uuid): + return PolicyRuleId(**{ + 'uuid': {'uuid': policy_rule_uuid} + }) + class _Resource(Resource): def __init__(self, database : Database) -> None: super().__init__() @@ -151,6 +157,18 @@ class Connection(_Resource): def get(self, connection_uuid : str): return format_grpc_to_json(self.servicer.GetConnection(grpc_connection_id(connection_uuid), None)) +class PolicyRuleIds(_Resource): + def get(self): + return format_grpc_to_json(self.servicer.ListPolicyRuleIds(Empty(), None)) + +class PolicyRules(_Resource): + def get(self): + return format_grpc_to_json(self.servicer.ListPolicyRules(Empty(), None)) + +class PolicyRule(_Resource): + def get(self, policy_rule_uuid : str): + return format_grpc_to_json(self.servicer.GetPolicyRule(grpc_policy_rule_id(policy_rule_uuid), None)) + class DumpText(Resource): def __init__(self, database : Database) -> None: super().__init__() @@ -219,6 +237,10 @@ RESOURCES = [ ('api.connections', Connections, '/context/<string:context_uuid>/service/<path:service_uuid>/connections'), ('api.connection', Connection, '/connection/<path:connection_uuid>'), + ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), + ('api.policyrules', PolicyRules, '/policyrules'), + ('api.policyrule', PolicyRule, '/policyrule/<string:policyrule_uuid>'), + ('api.dump.text', DumpText, '/dump/text'), ('api.dump.html', DumpHtml, '/dump/html'), ] diff --git a/src/context/tests/Objects.py b/src/context/tests/Objects.py index 140cbff686eaf5b430f23ee987a9335ecb04c0f5..1cf929cfa578e8bbf8f95885cc2a7bc7e7b9f3ef 100644 --- a/src/context/tests/Objects.py +++ b/src/context/tests/Objects.py @@ -23,6 +23,7 @@ from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id from common.tools.object_factory.Link import json_link, json_link_id from common.tools.object_factory.Service import json_service_id, json_service_l3nm_planned from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.tools.object_factory.PolicyRule import json_policy_rule, json_policy_rule_id # ----- Context -------------------------------------------------------------------------------------------------------- @@ -197,3 +198,9 @@ CONNECTION_R1_R3_SVCIDS = [SERVICE_R1_R2_ID, SERVICE_R2_R3_ID] CONNECTION_R1_R3 = json_connection( CONNECTION_R1_R3_UUID, service_id=SERVICE_R1_R3_ID, path_hops_endpoint_ids=CONNECTION_R1_R3_EPIDS, sub_service_ids=CONNECTION_R1_R3_SVCIDS) + + +# ----- PolicyRule ------------------------------------------------------------------------------------------------------- +POLICY_RULE_UUID = '56380225-3e40-4f74-9162-529f8dcb96a1' +POLICY_RULE_ID = json_policy_rule_id(POLICY_RULE_UUID) +POLICY_RULE = json_policy_rule(POLICY_RULE_UUID) diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py index 3109ef13dea98d4a56d661871b1c38ee2296f890..022c0472039d526e488f8a69096fae8c0edbdb48 100644 --- a/src/context/tests/test_unitary.py +++ b/src/context/tests/test_unitary.py @@ -27,6 +27,7 @@ from common.proto.context_pb2 import ( Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId, DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId) +from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule) from common.type_checkers.Assertions import ( validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids, validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids, @@ -44,7 +45,8 @@ from .Objects import ( CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID, DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2, LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3, - SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID) + SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID, + POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -68,8 +70,8 @@ REDIS_CONFIG = { } SCENARIOS = [ - ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ), - ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), + ('all_inmemory', DatabaseBackendEnum.INMEMORY, {}, MessageBrokerBackendEnum.INMEMORY, {} ) +# ('all_redis', DatabaseBackendEnum.REDIS, REDIS_CONFIG, MessageBrokerBackendEnum.REDIS, REDIS_CONFIG), ] @pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS) @@ -1169,6 +1171,101 @@ def test_grpc_connection( assert len(db_entries) == 0 +def test_grpc_policy( + context_client_grpc : ContextClient, # pylint: disable=redefined-outer-name + context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name + context_database = context_db_mb[0] + + # ----- Clean the database ----------------------------------------------------------------------------------------- + context_database.clear_all() + + # ----- Initialize the EventsCollector ----------------------------------------------------------------------------- + #events_collector = EventsCollector(context_client_grpc) + #events_collector.start() + + # ----- Get when the object does not exist ------------------------------------------------------------------------- + POLICY_ID = 'no-uuid' + DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}} + + with pytest.raises(grpc.RpcError) as e: + context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID)) + + assert e.value.code() == grpc.StatusCode.NOT_FOUND + assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID) + + # ----- List when the object does not exist ------------------------------------------------------------------------ + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 0 + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 0 + + # ----- Dump state of database before create the object ------------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + # ----- Create the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Check create event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=1) + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + # ----- Update the object ------------------------------------------------------------------------------------------ + response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE)) + assert response.uuid.uuid == POLICY_RULE_UUID + + # ----- Dump state of database after create/update the object ------------------------------------------------------ + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 2 + + # ----- Get when the object exists --------------------------------------------------------------------------------- + response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID + + # ----- List when the object exists -------------------------------------------------------------------------------- + response = context_client_grpc.ListPolicyRuleIds(Empty()) + assert len(response.policyRuleIdList) == 1 + assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID + + response = context_client_grpc.ListPolicyRules(Empty()) + assert len(response.policyRules) == 1 + + # ----- Remove the object ------------------------------------------------------------------------------------------ + context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID)) + + # ----- Check remove event ----------------------------------------------------------------------------------------- + # events = events_collector.get_events(block=True, count=2) + + # assert isinstance(events[0], PolicyEvent) + # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE + # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID + + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + # events_collector.stop() + + # ----- Dump state of database after remove the object ------------------------------------------------------------- + db_entries = context_database.dump() + LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries))) + for db_entry in db_entries: + LOGGER.info(' [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover + LOGGER.info('-----------------------------------------------------------') + assert len(db_entries) == 0 + + + # ----- Test REST API methods ------------------------------------------------------------------------------------------ def test_rest_populate_database( @@ -1224,6 +1321,22 @@ def test_rest_get_service(context_service_rest : RestServer): # pylint: disable= reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid)) validate_service(reply) +def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid)) + #validate_slice_ids(reply) + +def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) + reply = do_rest_request('/context/{:s}/slices'.format(context_uuid)) + #validate_slices(reply) + +#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID) +# slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='') +# reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid)) +# #validate_slice(reply) + def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name reply = do_rest_request('/device_ids') validate_device_ids(reply) @@ -1267,6 +1380,19 @@ def test_rest_get_connection(context_service_rest : RestServer): # pylint: disab reply = do_rest_request('/connection/{:s}'.format(connection_uuid)) validate_connection(reply) +def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrule_ids') + #validate_policyrule_ids(reply) + +def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name + reply = do_rest_request('/policyrules') + #validate_policyrules(reply) + +#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name +# policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='') +# reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid)) +# #validate_policyrule(reply) + # ----- Test misc. Context internal tools ------------------------------------------------------------------------------ diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index 069c07ce40e43192b74519b2175e7e10c638cd20..b8ff795fbd9466874b07f1f752fce682ea741111 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -28,7 +28,7 @@ from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\ P4_VAL_DEF_VENDOR, P4_VAL_DEF_HW_VER, P4_VAL_DEF_SW_VER,\ P4_VAL_DEF_TIMEOUT -from .p4_manager import P4Manager, get_api_version, KEY_TABLE,\ +from .p4_manager import P4Manager, KEY_TABLE,\ KEY_ACTION_PROFILE, KEY_COUNTER, KEY_DIR_COUNTER, KEY_METER, KEY_DIR_METER,\ KEY_CTL_PKT_METADATA from .p4_client import WriteOperation @@ -127,8 +127,7 @@ class P4Driver(_Driver): except Exception as ex: # pylint: disable=broad-except raise Exception(ex) from ex - LOGGER.info("\tConnected via P4Runtime version %s", - get_api_version()) + LOGGER.info("\tConnected via P4Runtime") self.__started.set() return True diff --git a/src/device/service/drivers/p4/p4_manager.py b/src/device/service/drivers/p4/p4_manager.py index 65f8602ea30fa2d8cd06b09655ee4ee63d045a97..178487250ea3a5652690fb39f1631a0133aec4e3 100644 --- a/src/device/service/drivers/p4/p4_manager.py +++ b/src/device/service/drivers/p4/p4_manager.py @@ -55,7 +55,7 @@ LOGGER = logging.getLogger(__name__) CONTEXT = Context() # Global P4Runtime client -CLIENT = None +CLIENTS = {} # Constant P4 entities KEY_TABLE = "table" @@ -76,25 +76,6 @@ def get_context(): """ return CONTEXT - -def get_client(): - """ - Return P4 client. - - :return: P4Runtime client object - """ - return CLIENT - - -def get_api_version(): - """ - Get the supported P4Runtime API version. - - :return: API version - """ - return CLIENT.api_version() - - def get_table_type(table): """ Assess the type of P4 table based upon the matching scheme. @@ -136,171 +117,28 @@ def match_type_to_str(match_type): return None -def insert_table_entry_exact( - table_name, match_map, action_name, action_params, metadata, - cnt_pkt=-1, cnt_byte=-1): - """ - Insert an entry into an exact match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - table_entry = TableEntry(table_name)(action=action_name) - - for match_k, match_v in match_map.items(): - table_entry.match[match_k] = match_v - - for action_k, action_v in action_params.items(): - table_entry.action[action_k] = action_v - - if metadata: - table_entry.metadata = metadata - - if cnt_pkt > 0: - table_entry.counter_data.packet_count = cnt_pkt - - if cnt_byte > 0: - table_entry.counter_data.byte_count = cnt_byte - - ex_msg = "" - try: - table_entry.insert() - LOGGER.info("Inserted exact table entry: %s", table_entry) - except (P4RuntimeException, P4RuntimeWriteException) as ex: - raise P4RuntimeException from ex - - # Table entry exists, needs to be modified - if "ALREADY_EXISTS" in ex_msg: - table_entry.modify() - LOGGER.info("Updated exact table entry: %s", table_entry) - - return table_entry - - -def insert_table_entry_ternary( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): - """ - Insert an entry into a ternary match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - table_entry = TableEntry(table_name)(action=action_name) - - for match_k, match_v in match_map.items(): - table_entry.match[match_k] = match_v - - for action_k, action_v in action_params.items(): - table_entry.action[action_k] = action_v - - table_entry.priority = priority - - if metadata: - table_entry.metadata = metadata - - if cnt_pkt > 0: - table_entry.counter_data.packet_count = cnt_pkt - - if cnt_byte > 0: - table_entry.counter_data.byte_count = cnt_byte - - ex_msg = "" - try: - table_entry.insert() - LOGGER.info("Inserted ternary table entry: %s", table_entry) - except (P4RuntimeException, P4RuntimeWriteException) as ex: - raise P4RuntimeException from ex - - # Table entry exists, needs to be modified - if "ALREADY_EXISTS" in ex_msg: - table_entry.modify() - LOGGER.info("Updated ternary table entry: %s", table_entry) - - return table_entry - - -def insert_table_entry_range( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument - """ - Insert an entry into a range match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - raise NotImplementedError( - "Range-based table insertion not implemented yet") - - -def insert_table_entry_optional( - table_name, match_map, action_name, action_params, metadata, - priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument - """ - Insert an entry into an optional match table. - - :param table_name: P4 table name - :param match_map: Map of match operations - :param action_name: Action name - :param action_params: Map of action parameters - :param metadata: table metadata - :param priority: entry priority - :param cnt_pkt: packet count - :param cnt_byte: byte count - :return: inserted entry - """ - assert match_map, "Table entry without match operations is not accepted" - assert action_name, "Table entry without action is not accepted" - - raise NotImplementedError( - "Optional-based table insertion not implemented yet") - class P4Manager: """ Class to manage the runtime entries of a P4 pipeline. """ + local_client = None + key_id = None def __init__(self, device_id: int, ip_address: str, port: int, election_id: tuple, role_name=None, ssl_options=None): - global CLIENT + global CLIENTS self.__id = device_id self.__ip_address = ip_address self.__port = int(port) self.__endpoint = f"{self.__ip_address}:{self.__port}" - CLIENT = P4RuntimeClient( + self.key_id = ip_address+str(port) + CLIENTS[self.key_id] = P4RuntimeClient( self.__id, self.__endpoint, election_id, role_name, ssl_options) self.__p4info = None + + self.local_client = CLIENTS[self.key_id] # Internal memory for whitebox management # | -> P4 entities @@ -339,27 +177,27 @@ class P4Manager: # Forwarding pipeline is only set iff both files are present if p4bin_path and p4info_path: try: - CLIENT.set_fwd_pipe_config(p4info_path, p4bin_path) + self.local_client.set_fwd_pipe_config(p4info_path, p4bin_path) except FileNotFoundError as ex: LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise FileNotFoundError(ex) from ex except P4RuntimeException as ex: LOGGER.critical("Error when setting config") LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise P4RuntimeException(ex) from ex except Exception as ex: # pylint: disable=broad-except LOGGER.critical("Error when setting config") - CLIENT.tear_down() + self.local_client.tear_down() raise Exception(ex) from ex try: - self.__p4info = CLIENT.get_p4info() + self.__p4info = self.local_client.get_p4info() except P4RuntimeException as ex: LOGGER.critical("Error when retrieving P4Info") LOGGER.critical(ex) - CLIENT.tear_down() + self.local_client.tear_down() raise P4RuntimeException(ex) from ex CONTEXT.set_p4info(self.__p4info) @@ -375,14 +213,15 @@ class P4Manager: :return: void """ - global CLIENT + global CLIENTS # gRPC client must already be instantiated - assert CLIENT + assert self.local_client # Trigger connection tear down with the P4Runtime server - CLIENT.tear_down() - CLIENT = None + self.local_client.tear_down() + # Remove client entry from global dictionary + CLIENTS.pop(self.key_id) self.__clear() LOGGER.info("P4Runtime manager stopped") @@ -723,7 +562,7 @@ class P4Manager: try: for count, table_entry in enumerate( - TableEntry(table_name)(action=action_name).read()): + TableEntry(self.local_client, table_name)(action=action_name).read()): LOGGER.debug( "Table %s - Entry %d\n%s", table_name, count, table_entry) self.table_entries[table_name].append(table_entry) @@ -856,6 +695,154 @@ class P4Manager: ) return None + def insert_table_entry_exact(self, + table_name, match_map, action_name, action_params, metadata, + cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into an exact match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(self.local_client, table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted exact table entry: %s", table_entry) + except (P4RuntimeException, P4RuntimeWriteException) as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated exact table entry: %s", table_entry) + + return table_entry + + + def insert_table_entry_ternary(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): + """ + Insert an entry into a ternary match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + table_entry = TableEntry(self.local_client, table_name)(action=action_name) + + for match_k, match_v in match_map.items(): + table_entry.match[match_k] = match_v + + for action_k, action_v in action_params.items(): + table_entry.action[action_k] = action_v + + table_entry.priority = priority + + if metadata: + table_entry.metadata = metadata + + if cnt_pkt > 0: + table_entry.counter_data.packet_count = cnt_pkt + + if cnt_byte > 0: + table_entry.counter_data.byte_count = cnt_byte + + ex_msg = "" + try: + table_entry.insert() + LOGGER.info("Inserted ternary table entry: %s", table_entry) + except (P4RuntimeException, P4RuntimeWriteException) as ex: + raise P4RuntimeException from ex + + # Table entry exists, needs to be modified + if "ALREADY_EXISTS" in ex_msg: + table_entry.modify() + LOGGER.info("Updated ternary table entry: %s", table_entry) + + return table_entry + + + def insert_table_entry_range(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into a range match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Range-based table insertion not implemented yet") + + + def insert_table_entry_optional(self, + table_name, match_map, action_name, action_params, metadata, + priority, cnt_pkt=-1, cnt_byte=-1): # pylint: disable=unused-argument + """ + Insert an entry into an optional match table. + + :param table_name: P4 table name + :param match_map: Map of match operations + :param action_name: Action name + :param action_params: Map of action parameters + :param metadata: table metadata + :param priority: entry priority + :param cnt_pkt: packet count + :param cnt_byte: byte count + :return: inserted entry + """ + assert match_map, "Table entry without match operations is not accepted" + assert action_name, "Table entry without action is not accepted" + + raise NotImplementedError( + "Optional-based table insertion not implemented yet") + def insert_table_entry(self, table_name, match_map, action_name, action_params, priority, metadata=None, cnt_pkt=-1, cnt_byte=-1): @@ -889,26 +876,26 @@ class P4Manager: # Exact match is supported if get_table_type(table) == p4info_pb2.MatchField.EXACT: - return insert_table_entry_exact( + return self.insert_table_entry_exact( table_name, match_map, action_name, action_params, metadata, cnt_pkt, cnt_byte) # Ternary and LPM matches are supported if get_table_type(table) in \ [p4info_pb2.MatchField.TERNARY, p4info_pb2.MatchField.LPM]: - return insert_table_entry_ternary( + return self.insert_table_entry_ternary( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) # TODO: Cover RANGE match # pylint: disable=W0511 if get_table_type(table) == p4info_pb2.MatchField.RANGE: - return insert_table_entry_range( + return self.insert_table_entry_range( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) # TODO: Cover OPTIONAL match # pylint: disable=W0511 if get_table_type(table) == p4info_pb2.MatchField.OPTIONAL: - return insert_table_entry_optional( + return self.insert_table_entry_optional( table_name, match_map, action_name, action_params, metadata, priority, cnt_pkt, cnt_byte) @@ -935,7 +922,7 @@ class P4Manager: LOGGER.error(msg) raise UserError(msg) - table_entry = TableEntry(table_name)(action=action_name) + table_entry = TableEntry(self.local_client, table_name)(action=action_name) for match_k, match_v in match_map.items(): table_entry.match[match_k] = match_v @@ -979,7 +966,7 @@ class P4Manager: LOGGER.error(msg) raise UserError(msg) - TableEntry(table_name).read(function=lambda x: x.delete()) + TableEntry(self.local_client, table_name).read(function=lambda x: x.delete()) LOGGER.info("Deleted all entries from table: %s", table_name) def print_table_entries_spec(self, table_name): @@ -1179,7 +1166,7 @@ class P4Manager: self.counter_entries[cnt_name] = [] try: - for count, cnt_entry in enumerate(CounterEntry(cnt_name).read()): + for count, cnt_entry in enumerate(CounterEntry(self.local_client, cnt_name).read()): LOGGER.debug( "Counter %s - Entry %d\n%s", cnt_name, count, cnt_entry) self.counter_entries[cnt_name].append(cnt_entry) @@ -1298,7 +1285,7 @@ class P4Manager: assert cnt, \ "P4 pipeline does not implement counter " + cnt_name - cnt_entry = CounterEntry(cnt_name) + cnt_entry = CounterEntry(self.local_client, cnt_name) if index: cnt_entry.index = index @@ -1325,7 +1312,7 @@ class P4Manager: assert cnt, \ "P4 pipeline does not implement counter " + cnt_name - cnt_entry = CounterEntry(cnt_name) + cnt_entry = CounterEntry(self.local_client, cnt_name) cnt_entry.clear_data() LOGGER.info("Cleared data of counter entry: %s", cnt_entry) @@ -1394,7 +1381,7 @@ class P4Manager: try: for count, d_cnt_entry in enumerate( - DirectCounterEntry(d_cnt_name).read()): + DirectCounterEntry(self.local_client, d_cnt_name).read()): LOGGER.debug( "Direct counter %s - Entry %d\n%s", d_cnt_name, count, d_cnt_entry) @@ -1530,7 +1517,7 @@ class P4Manager: assert match_map,\ "Direct counter entry without match operations is not accepted" - d_cnt_entry = DirectCounterEntry(d_cnt_name) + d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name) for match_k, match_v in match_map.items(): d_cnt_entry.table_entry.match[match_k] = match_v @@ -1559,7 +1546,7 @@ class P4Manager: assert d_cnt, \ "P4 pipeline does not implement direct counter " + d_cnt_name - d_cnt_entry = DirectCounterEntry(d_cnt_name) + d_cnt_entry = DirectCounterEntry(self.local_client, d_cnt_name) d_cnt_entry.clear_data() LOGGER.info("Cleared direct counter entry: %s", d_cnt_entry) @@ -1627,7 +1614,7 @@ class P4Manager: self.meter_entries[meter_name] = [] try: - for count, meter_entry in enumerate(MeterEntry(meter_name).read()): + for count, meter_entry in enumerate(MeterEntry(self.local_client, meter_name).read()): LOGGER.debug( "Meter %s - Entry %d\n%s", meter_name, count, meter_entry) self.meter_entries[meter_name].append(meter_entry) @@ -1756,7 +1743,7 @@ class P4Manager: assert meter, \ "P4 pipeline does not implement meter " + meter_name - meter_entry = MeterEntry(meter_name) + meter_entry = MeterEntry(self.local_client, meter_name) if index: meter_entry.index = index @@ -1789,7 +1776,7 @@ class P4Manager: assert meter, \ "P4 pipeline does not implement meter " + meter_name - meter_entry = MeterEntry(meter_name) + meter_entry = MeterEntry(self.local_client, meter_name) meter_entry.clear_config() LOGGER.info("Cleared meter entry: %s", meter_entry) @@ -1858,7 +1845,7 @@ class P4Manager: try: for count, d_meter_entry in enumerate( - MeterEntry(d_meter_name).read()): + MeterEntry(self.local_client, d_meter_name).read()): LOGGER.debug( "Direct meter %s - Entry %d\n%s", d_meter_name, count, d_meter_entry) @@ -1998,7 +1985,7 @@ class P4Manager: assert match_map,\ "Direct meter entry without match operations is not accepted" - d_meter_entry = DirectMeterEntry(d_meter_name) + d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name) for match_k, match_v in match_map.items(): d_meter_entry.table_entry.match[match_k] = match_v @@ -2031,7 +2018,7 @@ class P4Manager: assert d_meter, \ "P4 pipeline does not implement direct meter " + d_meter_name - d_meter_entry = DirectMeterEntry(d_meter_name) + d_meter_entry = DirectMeterEntry(self.local_client, d_meter_name) d_meter_entry.clear_config() LOGGER.info("Cleared direct meter entry: %s", d_meter_entry) @@ -2100,7 +2087,7 @@ class P4Manager: try: for count, ap_entry in enumerate( - ActionProfileMember(ap_name).read()): + ActionProfileMember(self.local_client, ap_name).read()): LOGGER.debug( "Action profile member %s - Entry %d\n%s", ap_name, count, ap_entry) @@ -2230,7 +2217,7 @@ class P4Manager: assert act_p, \ "P4 pipeline does not implement action profile " + ap_name - ap_member_entry = ActionProfileMember(ap_name)( + ap_member_entry = ActionProfileMember(self.local_client, ap_name)( member_id=member_id, action=action_name) for action_k, action_v in action_params.items(): @@ -2267,7 +2254,7 @@ class P4Manager: assert act_p, \ "P4 pipeline does not implement action profile " + ap_name - ap_member_entry = ActionProfileMember(ap_name)( + ap_member_entry = ActionProfileMember(self.local_client, ap_name)( member_id=member_id, action=action_name) ap_member_entry.delete() LOGGER.info("Deleted action profile member entry: %s", ap_member_entry) @@ -2364,7 +2351,7 @@ class P4Manager: try: for count, ap_entry in enumerate( - ActionProfileGroup(ap_name).read()): + ActionProfileGroup(self.local_client, ap_name).read()): LOGGER.debug("Action profile group %s - Entry %d\n%s", ap_name, count, ap_entry) self.action_profile_groups[ap_name].append(ap_entry) @@ -2483,7 +2470,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) if members: for m in members: @@ -2519,7 +2506,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) ap_group_entry.delete() LOGGER.info("Deleted action profile group entry: %s", ap_group_entry) @@ -2537,7 +2524,7 @@ class P4Manager: assert ap, \ "P4 pipeline does not implement action profile " + ap_name - ap_group_entry = ActionProfileGroup(ap_name)(group_id=group_id) + ap_group_entry = ActionProfileGroup(self.local_client, ap_name)(group_id=group_id) ap_group_entry.clear() LOGGER.info("Cleared action profile group entry: %s", ap_group_entry) @@ -2631,7 +2618,7 @@ class P4Manager: self.multicast_groups[group_id] = None try: - mcast_group = MulticastGroupEntry(group_id).read() + mcast_group = MulticastGroupEntry(self.local_client, group_id).read() LOGGER.debug("Multicast group %d\n%s", group_id, mcast_group) self.multicast_groups[group_id] = mcast_group return self.multicast_groups[group_id] @@ -2724,7 +2711,7 @@ class P4Manager: assert ports, \ "No multicast group ports are provided" - mcast_group = MulticastGroupEntry(group_id) + mcast_group = MulticastGroupEntry(self.local_client, group_id) for p in ports: mcast_group.add(p, 1) @@ -2756,7 +2743,7 @@ class P4Manager: assert group_id > 0, \ "Multicast group " + group_id + " must be > 0" - mcast_group = MulticastGroupEntry(group_id) + mcast_group = MulticastGroupEntry(self.local_client, group_id) mcast_group.delete() if group_id in self.multicast_groups: @@ -2772,7 +2759,7 @@ class P4Manager: :return: void """ - for mcast_group in MulticastGroupEntry().read(): + for mcast_group in MulticastGroupEntry(self.local_client).read(): gid = mcast_group.group_id mcast_group.delete() del self.multicast_groups[gid] @@ -2828,7 +2815,7 @@ class P4Manager: self.clone_session_entries[session_id] = None try: - session = CloneSessionEntry(session_id).read() + session = CloneSessionEntry(self.local_client, session_id).read() LOGGER.debug("Clone session %d\n%s", session_id, session) self.clone_session_entries[session_id] = session return self.clone_session_entries[session_id] @@ -2923,7 +2910,7 @@ class P4Manager: assert ports, \ "No clone session ports are provided" - session = CloneSessionEntry(session_id) + session = CloneSessionEntry(self.local_client, session_id) for p in ports: session.add(p, 1) @@ -2955,7 +2942,7 @@ class P4Manager: assert session_id > 0, \ "Clone session " + session_id + " must be > 0" - session = CloneSessionEntry(session_id) + session = CloneSessionEntry(self.local_client, session_id) session.delete() if session_id in self.clone_session_entries: @@ -2971,7 +2958,7 @@ class P4Manager: :return: void """ - for e in CloneSessionEntry().read(): + for e in CloneSessionEntry(self.local_client).read(): sid = e.session_id e.delete() del self.clone_session_entries[sid] @@ -3052,7 +3039,7 @@ class P4Manager: "No controller packet metadata in the pipeline\n") return None - packet_in = PacketOut() + packet_in = PacketIn(self.local_client) packet_in.payload = payload if metadata: for name, value in metadata.items(): @@ -3090,7 +3077,7 @@ class P4Manager: _t = Thread(target=_sniff_packet, args=(captured_packet,)) _t.start() # P4Runtime client sends the packet to the switch - CLIENT.stream_in_q["packet"].put(packet_in) + self.local_client.stream_in_q["packet"].put(packet_in) _t.join() LOGGER.info("Packet-in sent: %s", packet_in) @@ -3111,7 +3098,7 @@ class P4Manager: "No controller packet metadata in the pipeline\n") return None - packet_out = PacketOut() + packet_out = PacketOut(self.local_client) packet_out.payload = payload if metadata: for name, value in metadata.items(): @@ -3654,12 +3641,14 @@ class _EntityBase: """ Basic entity. """ + local_client = None - def __init__(self, entity_type, p4runtime_cls, modify_only=False): + def __init__(self, p4_client, entity_type, p4runtime_cls, modify_only=False): self._init = False self._entity_type = entity_type self._entry = p4runtime_cls() self._modify_only = modify_only + self.local_client = p4_client def __dir__(self): d = ["msg", "read"] @@ -3696,7 +3685,7 @@ class _EntityBase: update = p4runtime_pb2.Update() update.type = type_ getattr(update.entity, self._entity_type.name).CopyFrom(self._entry) - CLIENT.write_update(update) + self.local_client.write_update(update) def insert(self): """ @@ -3747,7 +3736,7 @@ class _EntityBase: entity = p4runtime_pb2.Entity() getattr(entity, self._entity_type.name).CopyFrom(self._entry) - iterator = CLIENT.read_one(entity) + iterator = self.local_client.read_one(entity) # Cannot use a (simpler) generator here as we need to # decorate __next__ with @parse_p4runtime_error. @@ -3794,9 +3783,9 @@ class _P4EntityBase(_EntityBase): Basic P4 entity. """ - def __init__(self, p4_type, entity_type, p4runtime_cls, name=None, + def __init__(self, p4_client, p4_type, entity_type, p4runtime_cls, name=None, modify_only=False): - super().__init__(entity_type, p4runtime_cls, modify_only) + super().__init__(p4_client, entity_type, p4runtime_cls, modify_only) self._p4_type = p4_type if name is None: raise UserError( @@ -3825,8 +3814,8 @@ class ActionProfileMember(_P4EntityBase): P4 action profile member. """ - def __init__(self, action_profile_name=None): - super().__init__( + def __init__(self, p4_client, action_profile_name=None): + super().__init__( p4_client, P4Type.action_profile, P4RuntimeEntity.action_profile_member, p4runtime_pb2.ActionProfileMember, action_profile_name) self.member_id = 0 @@ -3991,8 +3980,8 @@ class ActionProfileGroup(_P4EntityBase): P4 action profile group. """ - def __init__(self, action_profile_name=None): - super().__init__( + def __init__(self, p4_client, action_profile_name=None): + super().__init__( p4_client, P4Type.action_profile, P4RuntimeEntity.action_profile_group, p4runtime_pb2.ActionProfileGroup, action_profile_name) self.group_id = 0 @@ -4554,8 +4543,8 @@ class TableEntry(_P4EntityBase): "oneshot": cls._ActionSpecType.ONESHOT, }.get(name, None) - def __init__(self, table_name=None): - super().__init__( + def __init__(self, p4_client, table_name=None): + super().__init__(p4_client, P4Type.table, P4RuntimeEntity.table_entry, p4runtime_pb2.TableEntry, table_name) self.match = MatchKey(table_name, self._info.match_fields) @@ -4996,8 +4985,8 @@ class _CounterEntryBase(_P4EntityBase): Basic P4 counter entry. """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, p4_client, *args, **kwargs): + super().__init__(p4_client, *args, **kwargs) self._counter_type = self._info.spec.unit self.packet_count = -1 self.byte_count = -1 @@ -5065,8 +5054,8 @@ class CounterEntry(_CounterEntryBase): P4 counter entry. """ - def __init__(self, counter_name=None): - super().__init__( + def __init__(self, p4_client, counter_name=None): + super().__init__( p4_client, P4Type.counter, P4RuntimeEntity.counter_entry, p4runtime_pb2.CounterEntry, counter_name, modify_only=True) @@ -5126,10 +5115,11 @@ To write to the counter, use <self>.modify class DirectCounterEntry(_CounterEntryBase): """ Direct P4 counter entry. - """ + """ + local_client = None - def __init__(self, direct_counter_name=None): - super().__init__( + def __init__(self, p4_client, direct_counter_name=None): + super().__init__( p4_client, P4Type.direct_counter, P4RuntimeEntity.direct_counter_entry, p4runtime_pb2.DirectCounterEntry, direct_counter_name, modify_only=True) @@ -5140,7 +5130,8 @@ class DirectCounterEntry(_CounterEntryBase): except KeyError as ex: raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " f"is not a valid table id") from ex - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(p4_client, self._direct_table_name) + self.local_client = p4_client self.__doc__ = f""" An entry for direct counter '{direct_counter_name}' @@ -5167,7 +5158,7 @@ To write to the counter, use <self>.modify raise UserError("Direct counters are not index-based") if name == "table_entry": if value is None: - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(self.local_client, self._direct_table_name) return if not isinstance(value, TableEntry): raise UserError("table_entry must be an instance of TableEntry") @@ -5221,7 +5212,7 @@ class _MeterEntryBase(_P4EntityBase): Basic P4 meter entry. """ - def __init__(self, *args, **kwargs): + def __init__(self, p4_client, *args, **kwargs): super().__init__(*args, **kwargs) self._meter_type = self._info.spec.unit self.index = -1 @@ -5291,8 +5282,8 @@ class MeterEntry(_MeterEntryBase): P4 meter entry. """ - def __init__(self, meter_name=None): - super().__init__( + def __init__(self, p4_client, meter_name=None): + super().__init__(p4_client, P4Type.meter, P4RuntimeEntity.meter_entry, p4runtime_pb2.MeterEntry, meter_name, modify_only=True) @@ -5356,9 +5347,10 @@ class DirectMeterEntry(_MeterEntryBase): """ Direct P4 meter entry. """ + local_client = None - def __init__(self, direct_meter_name=None): - super().__init__( + def __init__(self, p4_client, direct_meter_name=None): + super().__init__(p4_client, P4Type.direct_meter, P4RuntimeEntity.direct_meter_entry, p4runtime_pb2.DirectMeterEntry, direct_meter_name, modify_only=True) @@ -5369,7 +5361,8 @@ class DirectMeterEntry(_MeterEntryBase): except KeyError as ex: raise InvalidP4InfoError(f"direct_table_id {self._direct_table_id} " f"is not a valid table id") from ex - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(p4_client, self._direct_table_name) + self.local_client = p4_client self.__doc__ = f""" An entry for direct meter '{direct_meter_name}' @@ -5399,7 +5392,7 @@ To write to the meter, use <self>.modify raise UserError("Direct meters are not index-based") if name == "table_entry": if value is None: - self._table_entry = TableEntry(self._direct_table_name) + self._table_entry = TableEntry(self.local_client, self._direct_table_name) return if not isinstance(value, TableEntry): raise UserError("table_entry must be an instance of TableEntry") @@ -5531,8 +5524,8 @@ class MulticastGroupEntry(_EntityBase): P4 multicast group entry. """ - def __init__(self, group_id=0): - super().__init__( + def __init__(self, p4_client, group_id=0): + super().__init__(p4_client, P4RuntimeEntity.packet_replication_engine_entry, p4runtime_pb2.PacketReplicationEngineEntry) self.group_id = group_id @@ -5609,8 +5602,8 @@ class CloneSessionEntry(_EntityBase): P4 clone session entry. """ - def __init__(self, session_id=0): - super().__init__( + def __init__(self, p4_client, session_id=0): + super().__init__(p4_client, P4RuntimeEntity.packet_replication_engine_entry, p4runtime_pb2.PacketReplicationEngineEntry) self.session_id = session_id @@ -5779,8 +5772,9 @@ class PacketIn(): """ P4 packet in. """ + local_client = None - def __init__(self): + def __init__(self, p4_client): ctrl_pkt_md = P4Objects(P4Type.controller_packet_metadata) self.md_info_list = {} if "packet_in" in ctrl_pkt_md: @@ -5788,10 +5782,11 @@ class PacketIn(): for md_info in self.p4_info.metadata: self.md_info_list[md_info.name] = md_info self.packet_in_queue = queue.Queue() + self.local_client = p4_client def _packet_in_recv_func(packet_in_queue): while True: - msg = CLIENT.get_stream_packet("packet", timeout=None) + msg = self.local_client.get_stream_packet("packet", timeout=None) if not msg: break packet_in_queue.put(msg) @@ -5857,8 +5852,9 @@ class PacketOut: """ P4 packet out. """ + local_client = None - def __init__(self, payload=b'', **kwargs): + def __init__(self, p4_client, payload=b'', **kwargs): self.p4_info = P4Objects(P4Type.controller_packet_metadata)[ "packet_out"] @@ -5868,6 +5864,7 @@ class PacketOut: if kwargs: for key, value in kwargs.items(): self.metadata[key] = value + self.local_client = p4_client def _update_msg(self): self._entry = p4runtime_pb2.PacketOut() @@ -5897,7 +5894,7 @@ class PacketOut: self._update_msg() msg = p4runtime_pb2.StreamMessageRequest() msg.packet.CopyFrom(self._entry) - CLIENT.stream_out_q.put(msg) + self.local_client.stream_out_q.put(msg) def str(self): """ @@ -5913,13 +5910,16 @@ class IdleTimeoutNotification(): """ P4 idle timeout notification. """ + + local_client = None - def __init__(self): + def __init__(self, p4_client): self.notification_queue = queue.Queue() + self.local_client = p4_client.local_client def _notification_recv_func(notification_queue): while True: - msg = CLIENT.get_stream_packet("idle_timeout_notification", + msg = self.local_client.get_stream_packet("idle_timeout_notification", timeout=None) if not msg: break diff --git a/src/dlt/connector/Config.py b/src/dlt/connector/Config.py index 9953c820575d42fa88351cc8de022d880ba96e6a..bdf9f306959e86160012541e8a72cc9aabb019c0 100644 --- a/src/dlt/connector/Config.py +++ b/src/dlt/connector/Config.py @@ -11,3 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import os + +DEFAULT_DLT_GATEWAY_HOST = '127.0.0.1' +DEFAULT_DLT_GATEWAY_PORT = '50051' + +# Find IP:port of gateway container as follows: +# - first check env vars DLT_GATEWAY_HOST & DLT_GATEWAY_PORT +# - if not set, use DEFAULT_DLT_GATEWAY_HOST & DEFAULT_DLT_GATEWAY_PORT +DLT_GATEWAY_HOST = str(os.environ.get('DLT_GATEWAY_HOST', DEFAULT_DLT_GATEWAY_HOST)) +DLT_GATEWAY_PORT = int(os.environ.get('DLT_GATEWAY_PORT', DEFAULT_DLT_GATEWAY_PORT)) diff --git a/src/dlt/connector/Dockerfile b/src/dlt/connector/Dockerfile index 51e9ec506f0c8a6c35ceac68833e3ad683ef8e63..c5d600ee0d55deb5a8bd4dca2d4f12cd092ad420 100644 --- a/src/dlt/connector/Dockerfile +++ b/src/dlt/connector/Dockerfile @@ -64,6 +64,8 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/. context/ COPY src/dlt/connector/. dlt/connector +COPY src/interdomain/. interdomain/ +COPY src/slice/. slice/ # Start the service ENTRYPOINT ["python", "-m", "dlt.connector.service"] diff --git a/src/dlt/connector/client/DltConnectorClient.py b/src/dlt/connector/client/DltConnectorClient.py index f48562996b067ca81a99b6ceb7288029be7ba1c8..1ca511d0434dd72458982bf7c7d55d8bbd1859f1 100644 --- a/src/dlt/connector/client/DltConnectorClient.py +++ b/src/dlt/connector/client/DltConnectorClient.py @@ -15,7 +15,8 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc -from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.context_pb2 import Empty, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -46,49 +47,63 @@ class DltConnectorClient: self.stub = None @RETRY_DECORATOR - def RecordAll(self, request : Empty) -> Empty: + def RecordAll(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAll request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAll(request) LOGGER.debug('RecordAll result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllDevices(self, request : Empty) -> Empty: + def RecordAllDevices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllDevices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllDevices(request) LOGGER.debug('RecordAllDevices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordDevice(self, request : DeviceId) -> Empty: + def RecordDevice(self, request : DltDeviceId) -> Empty: LOGGER.debug('RecordDevice request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordDevice(request) LOGGER.debug('RecordDevice result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllServices(self, request : Empty) -> Empty: + def RecordAllLinks(self, request : TopologyId) -> Empty: + LOGGER.debug('RecordAllLinks request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordAllLinks(request) + LOGGER.debug('RecordAllLinks result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordLink(self, request : DltLinkId) -> Empty: + LOGGER.debug('RecordLink request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RecordLink(request) + LOGGER.debug('RecordLink result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def RecordAllServices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllServices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllServices(request) LOGGER.debug('RecordAllServices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordService(self, request : ServiceId) -> Empty: + def RecordService(self, request : DltServiceId) -> Empty: LOGGER.debug('RecordService request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordService(request) LOGGER.debug('RecordService result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordAllSlices(self, request : Empty) -> Empty: + def RecordAllSlices(self, request : TopologyId) -> Empty: LOGGER.debug('RecordAllSlices request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordAllSlices(request) LOGGER.debug('RecordAllSlices result: {:s}'.format(grpc_message_to_json_string(response))) return response @RETRY_DECORATOR - def RecordSlice(self, request : SliceId) -> Empty: + def RecordSlice(self, request : DltSliceId) -> Empty: LOGGER.debug('RecordSlice request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.RecordSlice(request) LOGGER.debug('RecordSlice result: {:s}'.format(grpc_message_to_json_string(response))) diff --git a/src/dlt/connector/client/DltEventsCollector.py b/src/dlt/connector/client/DltEventsCollector.py index 6fe2474cead37094c507a8a612181dc7f7243544..d022ac0f0144eecfcdb706665a8bde81fa54492f 100644 --- a/src/dlt/connector/client/DltEventsCollector.py +++ b/src/dlt/connector/client/DltEventsCollector.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging, queue, threading +import grpc, logging, queue, threading, time from common.proto.dlt_gateway_pb2 import DltRecordSubscription from common.tools.grpc.Tools import grpc_message_to_json_string from dlt.connector.client.DltGatewayClient import DltGatewayClient @@ -20,32 +20,36 @@ from dlt.connector.client.DltGatewayClient import DltGatewayClient LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -class DltEventsCollector: +class DltEventsCollector(threading.Thread): def __init__( self, dltgateway_client : DltGatewayClient, log_events_received : bool = False, ) -> None: - self._events_queue = queue.Queue() + super().__init__(name='DltEventsCollector', daemon=True) + self._dltgateway_client = dltgateway_client self._log_events_received = log_events_received - subscription = DltRecordSubscription() # bu default subscribe to all - self._dltgateway_stream = dltgateway_client.SubscribeToDlt(subscription) - self._dltgateway_thread = self._create_collector_thread(self._dltgateway_stream) - - def _create_collector_thread(self, stream, as_daemon : bool = False): - return threading.Thread(target=self._collect, args=(stream,), daemon=as_daemon) - - def _collect(self, events_stream) -> None: - try: - for event in events_stream: - if self._log_events_received: - LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) - self._events_queue.put_nowait(event) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.CANCELLED: # pylint: disable=no-member - raise # pragma: no cover + self._events_queue = queue.Queue() + self._terminate = threading.Event() + self._dltgateway_stream = None - def start(self): - if self._dltgateway_thread is not None: self._dltgateway_thread.start() + def run(self) -> None: + self._terminate.clear() + while not self._terminate.is_set(): + try: + subscription = DltRecordSubscription() # bu default subscribe to all + self._dltgateway_stream = self._dltgateway_client.SubscribeToDlt(subscription) + for event in self._dltgateway_stream: + if self._log_events_received: + LOGGER.info('[_collect] event: {:s}'.format(grpc_message_to_json_string(event))) + self._events_queue.put_nowait(event) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: # pylint: disable=no-member + time.sleep(0.5) + continue + elif e.code() == grpc.StatusCode.CANCELLED: # pylint: disable=no-member + break + else: + raise # pragma: no cover def get_event(self, block : bool = True, timeout : float = 0.1): try: @@ -68,5 +72,5 @@ class DltEventsCollector: return sorted(events, key=lambda e: e.event.timestamp.timestamp) def stop(self): + self._terminate.set() if self._dltgateway_stream is not None: self._dltgateway_stream.cancel() - if self._dltgateway_thread is not None: self._dltgateway_thread.join() diff --git a/src/dlt/connector/client/DltGatewayClient.py b/src/dlt/connector/client/DltGatewayClient.py index f1f8dec391bb836cea33422176730d250090429d..e2f5530f9a971d0a25cac042d361c52db5c16304 100644 --- a/src/dlt/connector/client/DltGatewayClient.py +++ b/src/dlt/connector/client/DltGatewayClient.py @@ -14,14 +14,13 @@ from typing import Iterator import grpc, logging -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import Empty, TeraFlowController from common.proto.dlt_gateway_pb2 import ( DltPeerStatus, DltPeerStatusList, DltRecord, DltRecordEvent, DltRecordId, DltRecordStatus, DltRecordSubscription) from common.proto.dlt_gateway_pb2_grpc import DltGatewayServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string +from dlt.connector.Config import DLT_GATEWAY_HOST, DLT_GATEWAY_PORT LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 @@ -30,8 +29,8 @@ RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, class DltGatewayClient: def __init__(self, host=None, port=None): - if not host: host = get_service_host(ServiceNameEnum.DLT) - if not port: port = get_service_port_grpc(ServiceNameEnum.DLT) + if not host: host = DLT_GATEWAY_HOST + if not port: port = DLT_GATEWAY_PORT self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) LOGGER.debug('Creating channel to {:s}...'.format(self.endpoint)) self.channel = None diff --git a/src/dlt/connector/main_test.py b/src/dlt/connector/main_test.py index 4ad90eb35444b7ba4de00159372e466e8fc68905..a877a5ce39a29dd8bf37416868d9c5a701912259 100644 --- a/src/dlt/connector/main_test.py +++ b/src/dlt/connector/main_test.py @@ -1,8 +1,11 @@ # pip install grpcio==1.47.0 grpcio-tools==1.47.0 protobuf==3.20.1 +# PYTHONPATH=./src python # PYTHONPATH=/home/cttc/teraflow/src python -m dlt.connector.main_test import logging, sys, time -from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, DltRecord +from common.proto.dlt_gateway_pb2 import ( + DLTRECORDOPERATION_ADD, DLTRECORDOPERATION_UNDEFINED, DLTRECORDOPERATION_UPDATE, DLTRECORDTYPE_DEVICE, + DLTRECORDTYPE_UNDEFINED, DltRecord, DltRecordId) from common.tools.object_factory.Device import json_device from common.tools.grpc.Tools import grpc_message_to_json_string from src.common.proto.context_pb2 import DEVICEOPERATIONALSTATUS_ENABLED, Device @@ -12,13 +15,33 @@ from .client.DltEventsCollector import DltEventsCollector logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) +DLT_GATEWAY_HOST = '127.0.0.1' +DLT_GATEWAY_PORT = 30551 #50051 + +def record_found(record : DltRecord) -> bool: + found = True + found = found and (len(record.record_id.domain_uuid.uuid) > 0) + found = found and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) + found = found and (len(record.record_id.record_uuid.uuid) > 0) + #found = found and (record.operation != DLTRECORDOPERATION_UNDEFINED) + found = found and (len(record.data_json) > 0) + return found + def main(): - dltgateway_client = DltGatewayClient(host='127.0.0.1', port=50051) + dltgateway_client = DltGatewayClient(host=DLT_GATEWAY_HOST, port=DLT_GATEWAY_PORT) dltgateway_collector = DltEventsCollector(dltgateway_client, log_events_received=True) dltgateway_collector.start() time.sleep(3) + # Check record exists + dri = DltRecordId() + dri.domain_uuid.uuid = 'non-existing-domain' + dri.record_uuid.uuid = 'non-existing-record' + dri.type = DLTRECORDTYPE_DEVICE + reply = dltgateway_client.GetFromDlt(dri) + assert not record_found(reply), 'Record should not exist' + device = Device(**json_device('dev-1', 'packet-router', DEVICEOPERATIONALSTATUS_ENABLED)) r2dlt_req = DltRecord() diff --git a/src/dlt/connector/service/DltConnector.py b/src/dlt/connector/service/DltConnector.py deleted file mode 100644 index 0c42d66852e8eb895a07c761f7535a0d768a9e91..0000000000000000000000000000000000000000 --- a/src/dlt/connector/service/DltConnector.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, threading -from common.tools.grpc.Tools import grpc_message_to_json_string -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from dlt.connector.client.DltConnectorClient import DltConnectorClient - -LOGGER = logging.getLogger(__name__) - -class DltConnector: - def __init__(self) -> None: - LOGGER.debug('Creating connector...') - self._terminate = threading.Event() - self._thread = None - LOGGER.debug('Connector created') - - def start(self): - self._terminate.clear() - self._thread = threading.Thread(target=self._run_events_collector) - self._thread.start() - - def _run_events_collector(self) -> None: - dltconnector_client = DltConnectorClient() - context_client = ContextClient() - events_collector = EventsCollector(context_client) - events_collector.start() - - while not self._terminate.is_set(): - event = events_collector.get_event() - LOGGER.info('Event from Context Received: {:s}'.format(grpc_message_to_json_string(event))) - - events_collector.stop() - context_client.close() - dltconnector_client.close() - - def stop(self): - self._terminate.set() - self._thread.join() diff --git a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py index 860e46f3ab88b097f4aa8e06508b19518055e46f..6c5401cb1724f8a759001d790e835ab78ce4c6c6 100644 --- a/src/dlt/connector/service/DltConnectorServiceServicerImpl.py +++ b/src/dlt/connector/service/DltConnectorServiceServicerImpl.py @@ -13,9 +13,15 @@ # limitations under the License. import grpc, logging -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.proto.context_pb2 import DeviceId, Empty, ServiceId, SliceId +from common.proto.context_pb2 import DeviceId, Empty, LinkId, ServiceId, SliceId, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId from common.proto.dlt_connector_pb2_grpc import DltConnectorServiceServicer +from common.proto.dlt_gateway_pb2 import DltRecord, DltRecordId, DltRecordOperationEnum, DltRecordTypeEnum +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltGatewayClient import DltGatewayClient +from .tools.Checkers import record_exists LOGGER = logging.getLogger(__name__) @@ -23,6 +29,7 @@ SERVICE_NAME = 'DltConnector' METHOD_NAMES = [ 'RecordAll', 'RecordAllDevices', 'RecordDevice', + 'RecordAllLinks', 'RecordLink', 'RecordAllServices', 'RecordService', 'RecordAllSlices', 'RecordSlice', ] @@ -34,29 +41,121 @@ class DltConnectorServiceServicerImpl(DltConnectorServiceServicer): LOGGER.debug('Servicer Created') @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAll(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAll(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordAllDevices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: + return Empty() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RecordDevice(self, request : DltDeviceId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + device = context_client.GetDevice(request.device_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_DEVICE + dlt_record_id.record_uuid.uuid = device.device_id.device_uuid.uuid + + LOGGER.info('[RecordDevice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordDevice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordDevice] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(device) + LOGGER.info('[RecordDevice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordDevice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllDevices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllLinks(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordDevice(self, request : DeviceId, context : grpc.ServicerContext) -> Empty: + def RecordLink(self, request : DltLinkId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + link = context_client.GetLink(request.link_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_LINK + dlt_record_id.record_uuid.uuid = link.link_id.link_uuid.uuid + + LOGGER.info('[RecordLink] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordLink] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordLink] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(link) + LOGGER.info('[RecordLink] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordLink] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllServices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllServices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: + def RecordService(self, request : DltServiceId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordAllSlices(self, request : Empty, context : grpc.ServicerContext) -> Empty: + def RecordAllSlices(self, request : TopologyId, context : grpc.ServicerContext) -> Empty: return Empty() @safe_and_metered_rpc_method(METRICS, LOGGER) - def RecordSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: + def RecordSlice(self, request : DltSliceId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + slice_ = context_client.GetSlice(request.slice_id) + + dltgateway_client = DltGatewayClient() + + dlt_record_id = DltRecordId() + dlt_record_id.domain_uuid.uuid = request.topology_id.topology_uuid.uuid + dlt_record_id.type = DltRecordTypeEnum.DLTRECORDTYPE_SLICE + dlt_record_id.record_uuid.uuid = slice_.slice_id.slice_uuid.uuid + + LOGGER.info('[RecordSlice] sent dlt_record_id = {:s}'.format(grpc_message_to_json_string(dlt_record_id))) + dlt_record = dltgateway_client.GetFromDlt(dlt_record_id) + LOGGER.info('[RecordSlice] recv dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + + exists = record_exists(dlt_record) + LOGGER.info('[RecordSlice] exists = {:s}'.format(str(exists))) + + dlt_record = DltRecord() + dlt_record.record_id.CopyFrom(dlt_record_id) + dlt_record.operation = \ + DltRecordOperationEnum.DLTRECORDOPERATION_UPDATE \ + if exists else \ + DltRecordOperationEnum.DLTRECORDOPERATION_ADD + + dlt_record.data_json = grpc_message_to_json_string(slice_) + LOGGER.info('[RecordSlice] sent dlt_record = {:s}'.format(grpc_message_to_json_string(dlt_record))) + dlt_record_status = dltgateway_client.RecordToDlt(dlt_record) + LOGGER.info('[RecordSlice] recv dlt_record_status = {:s}'.format(grpc_message_to_json_string(dlt_record_status))) return Empty() diff --git a/src/dlt/connector/service/__main__.py b/src/dlt/connector/service/__main__.py index 435a93f61bf934a17d9c044756648176e9cb2d2d..76e7bc6f1bb1b50e736327d8f08c0880e45c6835 100644 --- a/src/dlt/connector/service/__main__.py +++ b/src/dlt/connector/service/__main__.py @@ -18,6 +18,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) +from .event_dispatcher.DltEventDispatcher import DltEventDispatcher from .DltConnectorService import DltConnectorService terminate = threading.Event() @@ -31,7 +32,7 @@ def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() - logging.basicConfig(level=log_level) + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) wait_for_environment_variables([ @@ -48,6 +49,10 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + # Starting DLT event dispatcher + event_dispatcher = DltEventDispatcher() + event_dispatcher.start() + # Starting DLT connector service grpc_service = DltConnectorService() grpc_service.start() @@ -57,6 +62,8 @@ def main(): LOGGER.info('Terminating...') grpc_service.stop() + event_dispatcher.stop() + event_dispatcher.join() LOGGER.info('Bye') return 0 diff --git a/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..8973ae621c1291f8ed6e2673f0c64b59712143ee --- /dev/null +++ b/src/dlt/connector/service/event_dispatcher/DltEventDispatcher.py @@ -0,0 +1,209 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, json, logging, threading +from typing import Any, Dict, Set +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, Device, EventTypeEnum, Link, Slice, TopologyId +from common.proto.dlt_connector_pb2 import DltSliceId +from common.proto.dlt_gateway_pb2 import DltRecordEvent, DltRecordOperationEnum, DltRecordTypeEnum +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Device import add_device_to_topology +from common.tools.context_queries.Link import add_link_to_topology +from common.tools.context_queries.Topology import create_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from dlt.connector.client.DltEventsCollector import DltEventsCollector +from dlt.connector.client.DltGatewayClient import DltGatewayClient +from interdomain.client.InterdomainClient import InterdomainClient + +LOGGER = logging.getLogger(__name__) + +GET_EVENT_TIMEOUT = 0.5 + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + +class Clients: + def __init__(self) -> None: + self.context_client = ContextClient() + self.dlt_connector_client = DltConnectorClient() + self.dlt_gateway_client = DltGatewayClient() + self.interdomain_client = InterdomainClient() + + def close(self) -> None: + self.interdomain_client.close() + self.dlt_gateway_client.close() + self.dlt_connector_client.close() + self.context_client.close() + +class DltEventDispatcher(threading.Thread): + def __init__(self) -> None: + LOGGER.debug('Creating connector...') + super().__init__(name='DltEventDispatcher', daemon=True) + self._terminate = threading.Event() + LOGGER.debug('Connector created') + + def start(self) -> None: + self._terminate.clear() + return super().start() + + def stop(self): + self._terminate.set() + + def run(self) -> None: + clients = Clients() + create_context(clients.context_client, DEFAULT_CONTEXT_UUID) + create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID) + create_topology(clients.context_client, DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID) + + dlt_events_collector = DltEventsCollector(clients.dlt_gateway_client, log_events_received=True) + dlt_events_collector.start() + + while not self._terminate.is_set(): + event = dlt_events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) + if event is None: continue + + existing_topology_ids = clients.context_client.ListTopologyIds(ADMIN_CONTEXT_ID) + local_domain_uuids = { + topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids + } + local_domain_uuids.discard(DEFAULT_TOPOLOGY_UUID) + local_domain_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + + self.dispatch_event(clients, local_domain_uuids, event) + + dlt_events_collector.stop() + clients.close() + + def dispatch_event(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + record_type : DltRecordTypeEnum = event.record_id.type # {UNDEFINED/CONTEXT/TOPOLOGY/DEVICE/LINK/SERVICE/SLICE} + if record_type == DltRecordTypeEnum.DLTRECORDTYPE_DEVICE: + self._dispatch_device(clients, local_domain_uuids, event) + elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_LINK: + self._dispatch_link(clients, local_domain_uuids, event) + elif record_type == DltRecordTypeEnum.DLTRECORDTYPE_SLICE: + self._dispatch_slice(clients, local_domain_uuids, event) + else: + raise NotImplementedError('EventType: {:s}'.format(grpc_message_to_json_string(event))) + + def _dispatch_device(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + domain_uuid : str = event.record_id.domain_uuid.uuid + + if domain_uuid in local_domain_uuids: + MSG = '[_dispatch_device] Ignoring DLT event received (local): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + return + + MSG = '[_dispatch_device] DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + LOGGER.info('[_dispatch_device] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_device] record={:s}'.format(grpc_message_to_json_string(record))) + + create_context(clients.context_client, domain_uuid) + create_topology(clients.context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + device = Device(**json.loads(record.data_json)) + clients.context_client.SetDevice(device) + device_uuid = device.device_id.device_uuid.uuid # pylint: disable=no-member + add_device_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, device_uuid) + domain_context_id = ContextId(**json_context_id(domain_uuid)) + add_device_to_topology(clients.context_client, domain_context_id, DEFAULT_TOPOLOGY_UUID, device_uuid) + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Device') + + def _dispatch_link(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + domain_uuid : str = event.record_id.domain_uuid.uuid + + if domain_uuid in local_domain_uuids: + MSG = '[_dispatch_link] Ignoring DLT event received (local): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + return + + MSG = '[_dispatch_link] DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + LOGGER.info('[_dispatch_link] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_link] record={:s}'.format(grpc_message_to_json_string(record))) + + link = Link(**json.loads(record.data_json)) + clients.context_client.SetLink(link) + link_uuid = link.link_id.link_uuid.uuid # pylint: disable=no-member + add_link_to_topology(clients.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID, link_uuid) + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Link') + + def _dispatch_slice(self, clients : Clients, local_domain_uuids : Set[str], event : DltRecordEvent) -> None: + event_type : EventTypeEnum = event.event.event_type # {UNDEFINED/CREATE/UPDATE/REMOVE} + domain_uuid : str = event.record_id.domain_uuid.uuid + + LOGGER.info('[_dispatch_slice] event.record_id={:s}'.format(grpc_message_to_json_string(event.record_id))) + record = clients.dlt_gateway_client.GetFromDlt(event.record_id) + LOGGER.info('[_dispatch_slice] record={:s}'.format(grpc_message_to_json_string(record))) + + slice_ = Slice(**json.loads(record.data_json)) + + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + owner_uuid = slice_.slice_owner.owner_uuid.uuid + create_context(clients.context_client, context_uuid) + create_topology(clients.context_client, context_uuid, DEFAULT_TOPOLOGY_UUID) + + if domain_uuid in local_domain_uuids: + # it is for "me" + if event_type in {EventTypeEnum.EVENTTYPE_CREATE, EventTypeEnum.EVENTTYPE_UPDATE}: + try: + db_slice = clients.context_client.GetSlice(slice_.slice_id) + # exists + db_json_slice = grpc_message_to_json_string(db_slice) + except grpc.RpcError: + # not exists + db_json_slice = None + + _json_slice = grpc_message_to_json_string(slice_) + if db_json_slice != _json_slice: + # not exists or is different... + slice_id = clients.interdomain_client.RequestSlice(slice_) + topology_id = TopologyId(**json_topology_id(domain_uuid)) + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + clients.dlt_connector_client.RecordSlice(dlt_slice_id) + + elif event_type in {EventTypeEnum.EVENTTYPE_DELETE}: + raise NotImplementedError('Delete Slice') + elif owner_uuid in local_domain_uuids: + # it is owned by me + # just update it locally + LOGGER.info('[_dispatch_slice] updating locally') + + local_slice = Slice() + local_slice.CopyFrom(slice_) + + # pylint: disable=no-member + del local_slice.slice_service_ids[:] # they are from remote domains so will not be present locally + del local_slice.slice_subslice_ids[:] # they are from remote domains so will not be present locally + + clients.context_client.SetSlice(local_slice) + else: + MSG = '[_dispatch_slice] Ignoring DLT event received (remote): {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(event))) + diff --git a/src/dlt/connector/service/event_dispatcher/__init__.py b/src/dlt/connector/service/event_dispatcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/connector/service/event_dispatcher/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/dlt/connector/service/tools/Checkers.py b/src/dlt/connector/service/tools/Checkers.py new file mode 100644 index 0000000000000000000000000000000000000000..e25d8d5a5068ee927088697ad3453fba99a1f316 --- /dev/null +++ b/src/dlt/connector/service/tools/Checkers.py @@ -0,0 +1,24 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.dlt_gateway_pb2 import DLTRECORDOPERATION_UNDEFINED, DLTRECORDTYPE_UNDEFINED, DltRecord + +def record_exists(record : DltRecord) -> bool: + exists = True + exists = exists and (len(record.record_id.domain_uuid.uuid) > 0) + exists = exists and (record.record_id.type != DLTRECORDTYPE_UNDEFINED) + exists = exists and (len(record.record_id.record_uuid.uuid) > 0) + #exists = exists and (record.operation != DLTRECORDOPERATION_UNDEFINED) + exists = exists and (len(record.data_json) > 0) + return exists diff --git a/src/dlt/connector/service/tools/__init__.py b/src/dlt/connector/service/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/dlt/connector/service/tools/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/interdomain/Dockerfile b/src/interdomain/Dockerfile index 388fcb76d08b49fdbc20baa3fb0d1ae957fdd46f..ee1071896d0ab0838a2126a2abb9a77278461573 100644 --- a/src/interdomain/Dockerfile +++ b/src/interdomain/Dockerfile @@ -63,10 +63,12 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow COPY src/context/. context/ -COPY src/device/. device/ +#COPY src/device/. device/ +COPY src/dlt/. dlt/ COPY src/interdomain/. interdomain/ -COPY src/monitoring/. monitoring/ -COPY src/service/. service/ +#COPY src/monitoring/. monitoring/ +COPY src/pathcomp/. pathcomp/ +#COPY src/service/. service/ COPY src/slice/. slice/ # Start the service diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index 01ba90ef5a6cb098e6d419fa0d6abb450893f8c6..a178095aeee81c3e6407cf1c6706b047fd1c65fc 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -12,15 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, logging -from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.proto.context_pb2 import ( - AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController) +import grpc, logging, uuid +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer -#from common.tools.grpc.Tools import grpc_message_to_json_string +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.InterDomain import ( + compute_interdomain_path, compute_traversed_domains, get_local_device_uuids, is_inter_domain, is_multi_domain) +from common.tools.context_queries.Topology import create_topology +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from interdomain.service.RemoteDomainClients import RemoteDomainClients +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from interdomain.service.topology_abstractor.DltRecordSender import DltRecordSender +from pathcomp.frontend.client.PathCompClient import PathCompClient from slice.client.SliceClient import SliceClient +from .RemoteDomainClients import RemoteDomainClients +from .Tools import compose_slice, compute_slice_owner, map_abstract_endpoints_to_real LOGGER = logging.getLogger(__name__) @@ -37,89 +46,92 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: context_client = ContextClient() + pathcomp_client = PathCompClient() slice_client = SliceClient() - - domains_to_endpoints = {} - local_domain_uuid = None - for slice_endpoint_id in request.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - domain_uuid = device_uuid.split('@')[1] - endpoints = domains_to_endpoints.setdefault(domain_uuid, []) - endpoints.append(slice_endpoint_id) - if local_domain_uuid is None: local_domain_uuid = domain_uuid + dlt_connector_client = DltConnectorClient() + + local_device_uuids = get_local_device_uuids(context_client) + slice_owner_uuid = request.slice_owner.owner_uuid.uuid + not_inter_domain = not is_inter_domain(context_client, request.slice_endpoint_ids) + no_slice_owner = len(slice_owner_uuid) == 0 + is_local_slice_owner = slice_owner_uuid in local_device_uuids + if not_inter_domain and (no_slice_owner or is_local_slice_owner): + str_slice = grpc_message_to_json_string(request) + raise Exception('InterDomain can only handle inter-domain slice requests: {:s}'.format(str_slice)) + + interdomain_path = compute_interdomain_path(pathcomp_client, request) + str_interdomain_path = [ + [device_uuid, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]] + for device_uuid, endpoint_ids in interdomain_path + ] + LOGGER.info('interdomain_path={:s}'.format(str(str_interdomain_path))) + + traversed_domains = compute_traversed_domains(context_client, interdomain_path) + str_traversed_domains = [ + (domain_uuid, is_local_domain, [ + (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) + for endpoint_id in endpoint_ids + ]) + for domain_uuid,is_local_domain,endpoint_ids in traversed_domains + ] + LOGGER.info('traversed_domains={:s}'.format(str(str_traversed_domains))) + + slice_owner_uuid = compute_slice_owner(context_client, traversed_domains) + LOGGER.info('slice_owner_uuid={:s}'.format(str(slice_owner_uuid))) + if slice_owner_uuid is None: + raise Exception('Unable to identify slice owner') reply = Slice() reply.CopyFrom(request) - # decompose remote slices - for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items(): - if domain_uuid == local_domain_uuid: continue - - remote_slice_request = Slice() - remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - remote_slice_request.slice_id.slice_uuid.uuid = \ - request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid - remote_slice_request.slice_status.slice_status = request.slice_status.slice_status - for endpoint_id in slice_endpoint_ids: - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid - slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid - - # add endpoint connecting to remote domain - if domain_uuid == 'D1': - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - elif domain_uuid == 'D2': - slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - - interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow') - remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request) - if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member - # successful case - remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request) - if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: - raise Exception('Remote Slice creation failed. Wrong Slice status returned') + dlt_record_sender = DltRecordSender(context_client, dlt_connector_client) + + for domain_uuid, is_local_domain, endpoint_ids in traversed_domains: + if is_local_domain: + slice_uuid = str(uuid.uuid4()) + LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( + str(domain_uuid), str(is_local_domain), str(slice_uuid))) + + # local slices always in DEFAULT_CONTEXT_UUID + #context_uuid = request.slice_id.context_id.context_uuid.uuid + context_uuid = DEFAULT_CONTEXT_UUID + endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids) + sub_slice = compose_slice( + context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, + config_rules=request.slice_config.config_rules) + LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + sub_slice_id = slice_client.CreateSlice(sub_slice) else: - # not in catalog - remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request) - if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: - raise Exception('Remote Slice creation failed. Wrong Slice status returned') - - #context_client.SetSlice(remote_slice) - #subslice_id = reply.slice_subslice_ids.add() - #subslice_id.CopyFrom(remote_slice.slice_id) - - local_slice_request = Slice() - local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice' - local_slice_request.slice_status.slice_status = request.slice_status.slice_status - for endpoint_id in domains_to_endpoints[local_domain_uuid]: - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.CopyFrom(endpoint_id) - - # add endpoint connecting to remote domain - if local_domain_uuid == 'D1': - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - elif local_domain_uuid == 'D2': - slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() - slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' - slice_endpoint_id.endpoint_uuid.uuid = '2/1' - - local_slice_reply = slice_client.CreateSlice(local_slice_request) - if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member - raise Exception('Local Slice creation failed. Wrong Slice Id was returned') - - subslice_id = reply.slice_subslice_ids.add() - subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid - subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid - - context_client.SetSlice(reply) - return reply.slice_id + slice_uuid = request.slice_id.slice_uuid.uuid + LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( + str(domain_uuid), str(is_local_domain), str(slice_uuid))) + + # create context/topology for the remote domains where we are creating slices + create_context(context_client, domain_uuid) + create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_UUID) + sub_slice = compose_slice( + domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, + config_rules=request.slice_config.config_rules, owner_uuid=slice_owner_uuid) + LOGGER.info('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + sub_slice_id = context_client.SetSlice(sub_slice) + topology_id = TopologyId(**json_topology_id(domain_uuid)) + dlt_record_sender.add_slice(topology_id, sub_slice) + + LOGGER.info('[loop] adding sub-slice') + reply.slice_subslice_ids.add().CopyFrom(sub_slice_id) # pylint: disable=no-member + + LOGGER.info('Recording Remote Slice requests to DLT') + dlt_record_sender.commit() + + LOGGER.info('Activating interdomain slice') + reply.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + + LOGGER.info('Updating interdomain slice') + slice_id = context_client.SetSlice(reply) + return slice_id @safe_and_metered_rpc_method(METRICS, LOGGER) def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..fb6371603ea90437437541bb995a59813764d9ef --- /dev/null +++ b/src/interdomain/service/Tools.py @@ -0,0 +1,131 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import List, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ( + ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum) +from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border +from common.tools.context_queries.InterDomain import get_local_device_uuids +from common.tools.grpc.ConfigRules import copy_config_rules +from common.tools.grpc.Constraints import copy_constraints +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +def compute_slice_owner( + context_client : ContextClient, traversed_domains : List[Tuple[str, Device, bool, List[EndPointId]]] +) -> Optional[str]: + traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains} + + existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + existing_topology_uuids = { + topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids + } + existing_topology_uuids.discard(DEFAULT_TOPOLOGY_UUID) + existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_UUID) + + candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids) + if len(candidate_owner_uuids) != 1: + data = { + 'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ], + 'existing_topology_uuids': [et_uuid for et_uuid in existing_topology_uuids], + 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], + } + LOGGER.warning('Unable to identify slice owner: {:s}'.format(json.dumps(data))) + return None + + return candidate_owner_uuids.pop() + +def compose_slice( + context_uuid : str, slice_uuid : str, endpoint_ids : List[EndPointId], constraints : List[Constraint] = [], + config_rules : List[ConfigRule] = [], owner_uuid : Optional[str] = None +) -> Slice: + slice_ = Slice() + slice_.slice_id.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member + slice_.slice_id.slice_uuid.uuid = slice_uuid # pylint: disable=no-member + slice_.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member + + if owner_uuid is not None: + slice_.slice_owner.owner_uuid.uuid = owner_uuid # pylint: disable=no-member + + if len(endpoint_ids) >= 2: + slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[0]) # pylint: disable=no-member + slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[-1]) # pylint: disable=no-member + + if len(constraints) > 0: + copy_constraints(constraints, slice_.slice_constraints) # pylint: disable=no-member + + if len(config_rules) > 0: + copy_config_rules(config_rules, slice_.slice_config.config_rules) # pylint: disable=no-member + + return slice_ + +def map_abstract_endpoints_to_real( + context_client : ContextClient, local_domain_uuid : str, abstract_endpoint_ids : List[EndPointId] +) -> List[EndPointId]: + + local_device_uuids = get_local_device_uuids(context_client) + all_devices = context_client.ListDevices(Empty()) + + map_endpoints_to_devices = dict() + for device in all_devices.devices: + LOGGER.info('[map_abstract_endpoints_to_real] Checking device {:s}'.format( + grpc_message_to_json_string(device))) + + if device_type_is_network(device.device_type): + LOGGER.info('[map_abstract_endpoints_to_real] Ignoring network device') + continue + device_uuid = device.device_id.device_uuid.uuid + if device_uuid not in local_device_uuids: + LOGGER.info('[map_abstract_endpoints_to_real] Ignoring non-local device') + continue + + for endpoint in device.device_endpoints: + LOGGER.info('[map_abstract_endpoints_to_real] Checking endpoint {:s}'.format( + grpc_message_to_json_string(endpoint))) + endpoint_id = endpoint.endpoint_id + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + map_endpoints_to_devices[(device_uuid, endpoint_uuid)] = endpoint_id + if endpoint_type_is_border(endpoint.endpoint_type): + map_endpoints_to_devices[(local_domain_uuid, endpoint_uuid)] = endpoint_id + + LOGGER.info('[map_abstract_endpoints_to_real] map_endpoints_to_devices={:s}'.format( + str({ + endpoint_tuple:grpc_message_to_json(endpoint_id) + for endpoint_tuple,endpoint_id in map_endpoints_to_devices.items() + }))) + + # map abstract device/endpoints to real device/endpoints + real_endpoint_ids = [] + for endpoint_id in abstract_endpoint_ids: + LOGGER.info('[map_abstract_endpoints_to_real] Mapping endpoint_id {:s} ...'.format( + grpc_message_to_json_string(endpoint_id))) + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + _endpoint_id = map_endpoints_to_devices.get((device_uuid, endpoint_uuid)) + if _endpoint_id is None: + LOGGER.warning('map_endpoints_to_devices={:s}'.format(str(map_endpoints_to_devices))) + MSG = 'Unable to map abstract EndPoint({:s}) to real one.' + raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id))) + + LOGGER.info('[map_abstract_endpoints_to_real] ... to endpoint_id {:s}'.format( + grpc_message_to_json_string(_endpoint_id))) + real_endpoint_ids.append(_endpoint_id) + + return real_endpoint_ids diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index c0a078f4ded85ab957011d21d56c97c8d303dc2a..bcbda8dfda05ec7b245b5939d8a3afc4b979562f 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -17,7 +17,8 @@ from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, - get_service_port_grpc, wait_for_environment_variables) + wait_for_environment_variables) +from .topology_abstractor.TopologyAbstractor import TopologyAbstractor from .InterdomainService import InterdomainService from .RemoteDomainClients import RemoteDomainClients @@ -32,14 +33,18 @@ def main(): global LOGGER # pylint: disable=global-statement log_level = get_log_level() - logging.basicConfig(level=log_level) + logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) wait_for_environment_variables([ - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), - get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) @@ -58,14 +63,19 @@ def main(): grpc_service = InterdomainService(remote_domain_clients) grpc_service.start() + # Subscribe to Context Events + topology_abstractor = TopologyAbstractor() + topology_abstractor.start() + # TODO: improve with configuration the definition of the remote peers - interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN) - remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc) + #interdomain_service_port_grpc = get_service_port_grpc(ServiceNameEnum.INTERDOMAIN) + #remote_domain_clients.add_peer('remote-teraflow', 'remote-teraflow', interdomain_service_port_grpc) # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=0.1): pass LOGGER.info('Terminating...') + topology_abstractor.stop() grpc_service.stop() LOGGER.info('Bye') diff --git a/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..01ba90ef5a6cb098e6d419fa0d6abb450893f8c6 --- /dev/null +++ b/src/interdomain/service/_old_code/InterdomainServiceServicerImpl.py @@ -0,0 +1,153 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.proto.context_pb2 import ( + AuthenticationResult, Slice, SliceId, SliceStatus, SliceStatusEnum, TeraFlowController) +from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer +#from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from interdomain.service.RemoteDomainClients import RemoteDomainClients +from slice.client.SliceClient import SliceClient + +LOGGER = logging.getLogger(__name__) + +SERVICE_NAME = 'Interdomain' +METHOD_NAMES = ['RequestSlice', 'Authenticate', 'LookUpSlice', 'OrderSliceFromCatalog', 'CreateSliceAndAddToCatalog'] +METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) + +class InterdomainServiceServicerImpl(InterdomainServiceServicer): + def __init__(self, remote_domain_clients : RemoteDomainClients): + LOGGER.debug('Creating Servicer...') + self.remote_domain_clients = remote_domain_clients + LOGGER.debug('Servicer Created') + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def RequestSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + context_client = ContextClient() + slice_client = SliceClient() + + domains_to_endpoints = {} + local_domain_uuid = None + for slice_endpoint_id in request.slice_endpoint_ids: + device_uuid = slice_endpoint_id.device_id.device_uuid.uuid + domain_uuid = device_uuid.split('@')[1] + endpoints = domains_to_endpoints.setdefault(domain_uuid, []) + endpoints.append(slice_endpoint_id) + if local_domain_uuid is None: local_domain_uuid = domain_uuid + + reply = Slice() + reply.CopyFrom(request) + + # decompose remote slices + for domain_uuid, slice_endpoint_ids in domains_to_endpoints.items(): + if domain_uuid == local_domain_uuid: continue + + remote_slice_request = Slice() + remote_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + remote_slice_request.slice_id.slice_uuid.uuid = \ + request.slice_id.slice_uuid.uuid + ':subslice@' + local_domain_uuid + remote_slice_request.slice_status.slice_status = request.slice_status.slice_status + for endpoint_id in slice_endpoint_ids: + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = endpoint_id.device_id.device_uuid.uuid + slice_endpoint_id.endpoint_uuid.uuid = endpoint_id.endpoint_uuid.uuid + + # add endpoint connecting to remote domain + if domain_uuid == 'D1': + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + elif domain_uuid == 'D2': + slice_endpoint_id = remote_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + + interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow') + remote_slice_reply = interdomain_client.LookUpSlice(remote_slice_request) + if remote_slice_reply == remote_slice_request.slice_id: # pylint: disable=no-member + # successful case + remote_slice = interdomain_client.OrderSliceFromCatalog(remote_slice_request) + if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: + raise Exception('Remote Slice creation failed. Wrong Slice status returned') + else: + # not in catalog + remote_slice = interdomain_client.CreateSliceAndAddToCatalog(remote_slice_request) + if remote_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: + raise Exception('Remote Slice creation failed. Wrong Slice status returned') + + #context_client.SetSlice(remote_slice) + #subslice_id = reply.slice_subslice_ids.add() + #subslice_id.CopyFrom(remote_slice.slice_id) + + local_slice_request = Slice() + local_slice_request.slice_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + local_slice_request.slice_id.slice_uuid.uuid = request.slice_id.slice_uuid.uuid + ':subslice' + local_slice_request.slice_status.slice_status = request.slice_status.slice_status + for endpoint_id in domains_to_endpoints[local_domain_uuid]: + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.CopyFrom(endpoint_id) + + # add endpoint connecting to remote domain + if local_domain_uuid == 'D1': + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R4@D1' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + elif local_domain_uuid == 'D2': + slice_endpoint_id = local_slice_request.slice_endpoint_ids.add() + slice_endpoint_id.device_id.device_uuid.uuid = 'R1@D2' + slice_endpoint_id.endpoint_uuid.uuid = '2/1' + + local_slice_reply = slice_client.CreateSlice(local_slice_request) + if local_slice_reply != local_slice_request.slice_id: # pylint: disable=no-member + raise Exception('Local Slice creation failed. Wrong Slice Id was returned') + + subslice_id = reply.slice_subslice_ids.add() + subslice_id.context_id.context_uuid.uuid = local_slice_request.slice_id.context_id.context_uuid.uuid + subslice_id.slice_uuid.uuid = local_slice_request.slice_id.slice_uuid.uuid + + context_client.SetSlice(reply) + return reply.slice_id + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def Authenticate(self, request : TeraFlowController, context : grpc.ServicerContext) -> AuthenticationResult: + auth_result = AuthenticationResult() + auth_result.context_id.CopyFrom(request.context_id) # pylint: disable=no-member + auth_result.authenticated = True + return auth_result + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: + try: + context_client = ContextClient() + slice_ = context_client.GetSlice(request.slice_id) + return slice_.slice_id + except grpc.RpcError: + #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id))) + return SliceId() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + raise NotImplementedError('OrderSliceFromCatalog') + #return Slice() + + @safe_and_metered_rpc_method(METRICS, LOGGER) + def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + context_client = ContextClient() + slice_client = SliceClient() + reply = slice_client.CreateSlice(request) + if reply != request.slice_id: # pylint: disable=no-member + raise Exception('Slice creation failed. Wrong Slice Id was returned') + return context_client.GetSlice(request.slice_id) diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py new file mode 100644 index 0000000000000000000000000000000000000000..3448c1036d4ef086d679d5f4308ae95decfbffa7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py @@ -0,0 +1,190 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging +from typing import Dict, Optional +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + ContextId, Device, DeviceDriverEnum, DeviceId, DeviceOperationalStatusEnum, EndPoint) +from common.tools.context_queries.CheckType import ( + device_type_is_datacenter, device_type_is_network, endpoint_type_is_border) +from common.tools.context_queries.Device import add_device_to_topology, get_existing_device_uuids +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device, json_device_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +class AbstractDevice: + def __init__(self, device_uuid : str, device_type : DeviceTypeEnum): + self.__context_client = ContextClient() + self.__device_uuid : str = device_uuid + self.__device_type : DeviceTypeEnum = device_type + self.__device : Optional[Device] = None + self.__device_id : Optional[DeviceId] = None + + # Dict[device_uuid, Dict[endpoint_uuid, abstract EndPoint]] + self.__device_endpoint_to_abstract : Dict[str, Dict[str, EndPoint]] = dict() + + # Dict[endpoint_uuid, device_uuid] + self.__abstract_endpoint_to_device : Dict[str, str] = dict() + + @property + def uuid(self) -> str: return self.__device_uuid + + @property + def device_id(self) -> Optional[DeviceId]: return self.__device_id + + @property + def device(self) -> Optional[Device]: return self.__device + + def get_endpoint(self, device_uuid : str, endpoint_uuid : str) -> Optional[EndPoint]: + return self.__device_endpoint_to_abstract.get(device_uuid, {}).get(endpoint_uuid) + + def initialize(self) -> bool: + if self.__device is not None: return False + + existing_device_uuids = get_existing_device_uuids(self.__context_client) + create_abstract_device = self.__device_uuid not in existing_device_uuids + + if create_abstract_device: + self._create_empty() + else: + self._load_existing() + + is_datacenter = device_type_is_datacenter(self.__device_type) + is_network = device_type_is_network(self.__device_type) + if is_datacenter or is_network: + # Add abstract device to topologies [INTERDOMAIN_TOPOLOGY_UUID] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + for topology_uuid in topology_uuids: + add_device_to_topology(self.__context_client, context_id, topology_uuid, self.__device_uuid) + + # seems not needed; to be removed in future releases + #if is_datacenter and create_abstract_device: + # dc_device = self.__context_client.GetDevice(DeviceId(**json_device_id(self.__device_uuid))) + # if device_type_is_datacenter(dc_device.device_type): + # self.update_endpoints(dc_device) + #elif is_network: + # devices_in_admin_topology = get_devices_in_topology( + # self.__context_client, context_id, DEFAULT_TOPOLOGY_UUID) + # for device in devices_in_admin_topology: + # if device_type_is_datacenter(device.device_type): continue + # self.update_endpoints(device) + + return True + + def _create_empty(self) -> None: + device_uuid = self.__device_uuid + + device = Device(**json_device( + device_uuid, self.__device_type.value, DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED, + endpoints=[], config_rules=[], drivers=[DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] + )) + self.__context_client.SetDevice(device) + self.__device = device + self.__device_id = self.__device.device_id + + def _load_existing(self) -> None: + self.__device_endpoint_to_abstract = dict() + self.__abstract_endpoint_to_device = dict() + + self.__device_id = DeviceId(**json_device_id(self.__device_uuid)) + self.__device = self.__context_client.GetDevice(self.__device_id) + self.__device_type = self.__device.device_type + device_uuid = self.__device_id.device_uuid.uuid + + device_type = self.__device_type + is_datacenter = device_type_is_datacenter(device_type) + is_network = device_type_is_network(device_type) + if not is_datacenter and not is_network: + LOGGER.warning('Unsupported InterDomain Device Type: {:s}'.format(str(device_type))) + return + + # for each endpoint in abstract device, populate internal data structures and mappings + for interdomain_endpoint in self.__device.device_endpoints: + endpoint_uuid : str = interdomain_endpoint.endpoint_id.endpoint_uuid.uuid + + if is_network: + endpoint_uuid,device_uuid = endpoint_uuid.split('@', maxsplit=1) + + self.__device_endpoint_to_abstract\ + .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint) + self.__abstract_endpoint_to_device\ + .setdefault(endpoint_uuid, device_uuid) + + def _update_endpoint_type(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> bool: + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + interdomain_endpoint = device_endpoint_to_abstract.get(endpoint_uuid) + interdomain_endpoint_type = interdomain_endpoint.endpoint_type + if endpoint_type == interdomain_endpoint_type: return False + interdomain_endpoint.endpoint_type = endpoint_type + return True + + def _add_endpoint(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> EndPoint: + interdomain_endpoint = self.__device.device_endpoints.add() + interdomain_endpoint.endpoint_id.device_id.CopyFrom(self.__device_id) + interdomain_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid + interdomain_endpoint.endpoint_type = endpoint_type + + self.__device_endpoint_to_abstract\ + .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint) + self.__abstract_endpoint_to_device\ + .setdefault(endpoint_uuid, device_uuid) + + return interdomain_endpoint + + def _remove_endpoint( + self, device_uuid : str, endpoint_uuid : str, interdomain_endpoint : EndPoint + ) -> None: + self.__abstract_endpoint_to_device.pop(endpoint_uuid, None) + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + device_endpoint_to_abstract.pop(endpoint_uuid, None) + self.__device.device_endpoints.remove(interdomain_endpoint) + + def update_endpoints(self, device : Device) -> bool: + if device_type_is_datacenter(self.__device.device_type): return False + + device_uuid = device.device_id.device_uuid.uuid + device_border_endpoint_uuids = { + endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type + for endpoint in device.device_endpoints + if endpoint_type_is_border(endpoint.endpoint_type) + } + + updated = False + + # for each border endpoint in abstract device that is not in device; remove from abstract device + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + _device_endpoint_to_abstract = copy.deepcopy(device_endpoint_to_abstract) + for endpoint_uuid, interdomain_endpoint in _device_endpoint_to_abstract.items(): + if endpoint_uuid in device_border_endpoint_uuids: continue + # remove interdomain endpoint that is not in device + self._remove_endpoint(device_uuid, endpoint_uuid, interdomain_endpoint) + updated = True + + # for each border endpoint in device that is not in abstract device; add to abstract device + for endpoint_uuid,endpoint_type in device_border_endpoint_uuids.items(): + # if already added; just check endpoint type is not modified + if endpoint_uuid in self.__abstract_endpoint_to_device: + updated = updated or self._update_endpoint_type(device_uuid, endpoint_uuid, endpoint_type) + continue + + # otherwise, add it to the abstract device + self._add_endpoint(device_uuid, endpoint_uuid, endpoint_type) + updated = True + + return updated diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe7b07b0708ebf8490cf4304646037973b05d56 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/AbstractLink.py @@ -0,0 +1,126 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging +from typing import Dict, List, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, EndPointId, Link, LinkId +from common.tools.context_queries.Link import add_link_to_topology, get_existing_link_uuids +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Link import json_link, json_link_id +from context.client.ContextClient import ContextClient + +LOGGER = logging.getLogger(__name__) + +class AbstractLink: + def __init__(self, link_uuid : str): + self.__context_client = ContextClient() + self.__link_uuid : str = link_uuid + self.__link : Optional[Link] = None + self.__link_id : Optional[LinkId] = None + + # Dict[(device_uuid, endpoint_uuid), abstract EndPointId] + self.__device_endpoint_to_abstract : Dict[Tuple[str, str], EndPointId] = dict() + + @property + def uuid(self) -> str: return self.__link_uuid + + @property + def link_id(self) -> Optional[LinkId]: return self.__link_id + + @property + def link(self) -> Optional[Link]: return self.__link + + @staticmethod + def compose_uuid( + device_uuid_a : str, endpoint_uuid_a : str, device_uuid_z : str, endpoint_uuid_z : str + ) -> str: + # sort endpoints lexicographically to prevent duplicities + link_endpoint_uuids = sorted([ + (device_uuid_a, endpoint_uuid_a), + (device_uuid_z, endpoint_uuid_z) + ]) + link_uuid = '{:s}/{:s}=={:s}/{:s}'.format( + link_endpoint_uuids[0][0], link_endpoint_uuids[0][1], + link_endpoint_uuids[1][0], link_endpoint_uuids[1][1]) + return link_uuid + + def initialize(self) -> bool: + if self.__link is not None: return False + + existing_link_uuids = get_existing_link_uuids(self.__context_client) + + create = self.__link_uuid not in existing_link_uuids + if create: + self._create_empty() + else: + self._load_existing() + + # Add abstract link to topologies [INTERDOMAIN_TOPOLOGY_UUID] + context_id = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + topology_uuids = [INTERDOMAIN_TOPOLOGY_UUID] + for topology_uuid in topology_uuids: + add_link_to_topology(self.__context_client, context_id, topology_uuid, self.__link_uuid) + + return create + + def _create_empty(self) -> None: + link = Link(**json_link(self.__link_uuid, endpoint_ids=[])) + self.__context_client.SetLink(link) + self.__link = link + self.__link_id = self.__link.link_id + + def _load_existing(self) -> None: + self.__link_id = LinkId(**json_link_id(self.__link_uuid)) + self.__link = self.__context_client.GetLink(self.__link_id) + + self.__device_endpoint_to_abstract = dict() + + # for each endpoint in abstract link, populate internal data structures and mappings + for endpoint_id in self.__link.link_endpoint_ids: + device_uuid : str = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid + self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id) + + def _add_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: + endpoint_id = self.__link.link_endpoint_ids.add() + endpoint_id.device_id.device_uuid.uuid = device_uuid + endpoint_id.endpoint_uuid.uuid = endpoint_uuid + self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id) + + def _remove_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + endpoint_id = device_endpoint_to_abstract.pop(endpoint_uuid, None) + if endpoint_id is not None: self.__link.link_endpoint_ids.remove(endpoint_id) + + def update_endpoints(self, link_endpoint_uuids : List[Tuple[str, str]] = []) -> bool: + updated = False + + # for each endpoint in abstract link that is not in link; remove from abstract link + device_endpoint_to_abstract = copy.deepcopy(self.__device_endpoint_to_abstract) + for device_uuid, endpoint_uuid in device_endpoint_to_abstract.keys(): + if (device_uuid, endpoint_uuid) in link_endpoint_uuids: continue + # remove endpoint_id that is not in link + self._remove_endpoint(device_uuid, endpoint_uuid) + updated = True + + # for each endpoint in link that is not in abstract link; add to abstract link + for device_uuid, endpoint_uuid in link_endpoint_uuids: + # if already added; just check endpoint type is not modified + if (device_uuid, endpoint_uuid) in self.__device_endpoint_to_abstract: continue + # otherwise, add it to the abstract device + self._add_endpoint(device_uuid, endpoint_uuid) + updated = True + + return updated diff --git a/src/interdomain/service/topology_abstractor/DltRecordSender.py b/src/interdomain/service/topology_abstractor/DltRecordSender.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e3d81dded18c7406b54389cbe128c0fd27d7b4 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/DltRecordSender.py @@ -0,0 +1,91 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Tuple +from common.proto.context_pb2 import Device, Link, Service, Slice, TopologyId +from common.proto.dlt_connector_pb2 import DltDeviceId, DltLinkId, DltServiceId, DltSliceId +from context.client.ContextClient import ContextClient +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from .Types import DltRecordTypes + +LOGGER = logging.getLogger(__name__) + +class DltRecordSender: + def __init__(self, context_client : ContextClient, dlt_connector_client : DltConnectorClient) -> None: + self.context_client = context_client + self.dlt_connector_client = dlt_connector_client + self.dlt_record_uuids : List[str] = list() + self.dlt_record_uuid_to_data : Dict[str, Tuple[TopologyId, DltRecordTypes]] = dict() + + def _add_record(self, record_uuid : str, data : Tuple[TopologyId, DltRecordTypes]) -> None: + if record_uuid in self.dlt_record_uuid_to_data: return + self.dlt_record_uuid_to_data[record_uuid] = data + self.dlt_record_uuids.append(record_uuid) + + def add_device(self, topology_id : TopologyId, device : Device) -> None: + topology_uuid = topology_id.topology_uuid.uuid + device_uuid = device.device_id.device_uuid.uuid + record_uuid = '{:s}:device:{:s}'.format(topology_uuid, device_uuid) + self._add_record(record_uuid, (topology_id, device)) + + def add_link(self, topology_id : TopologyId, link : Link) -> None: + topology_uuid = topology_id.topology_uuid.uuid + link_uuid = link.link_id.link_uuid.uuid + record_uuid = '{:s}:link:{:s}'.format(topology_uuid, link_uuid) + self._add_record(record_uuid, (topology_id, link)) + + def add_service(self, topology_id : TopologyId, service : Service) -> None: + topology_uuid = topology_id.topology_uuid.uuid + context_uuid = service.service_id.context_id.context_uuid.uuid + service_uuid = service.service_id.service_uuid.uuid + record_uuid = '{:s}:service:{:s}/{:s}'.format(topology_uuid, context_uuid, service_uuid) + self._add_record(record_uuid, (topology_id, service)) + + def add_slice(self, topology_id : TopologyId, slice_ : Slice) -> None: + topology_uuid = topology_id.topology_uuid.uuid + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + slice_uuid = slice_.slice_id.slice_uuid.uuid + record_uuid = '{:s}:slice:{:s}/{:s}'.format(topology_uuid, context_uuid, slice_uuid) + self._add_record(record_uuid, (topology_id, slice_)) + + def commit(self) -> None: + for dlt_record_uuid in self.dlt_record_uuids: + topology_id,dlt_record = self.dlt_record_uuid_to_data[dlt_record_uuid] + if isinstance(dlt_record, Device): + device_id = self.context_client.SetDevice(dlt_record) + dlt_device_id = DltDeviceId() + dlt_device_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_device_id.device_id.CopyFrom(device_id) # pylint: disable=no-member + self.dlt_connector_client.RecordDevice(dlt_device_id) + elif isinstance(dlt_record, Link): + link_id = self.context_client.SetLink(dlt_record) + dlt_link_id = DltLinkId() + dlt_link_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_link_id.link_id.CopyFrom(link_id) # pylint: disable=no-member + self.dlt_connector_client.RecordLink(dlt_link_id) + elif isinstance(dlt_record, Service): + service_id = self.context_client.SetService(dlt_record) + dlt_service_id = DltServiceId() + dlt_service_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_service_id.service_id.CopyFrom(service_id) # pylint: disable=no-member + self.dlt_connector_client.RecordService(dlt_service_id) + elif isinstance(dlt_record, Slice): + slice_id = self.context_client.SetSlice(dlt_record) + dlt_slice_id = DltSliceId() + dlt_slice_id.topology_id.CopyFrom(topology_id) # pylint: disable=no-member + dlt_slice_id.slice_id.CopyFrom(slice_id) # pylint: disable=no-member + self.dlt_connector_client.RecordSlice(dlt_slice_id) + else: + LOGGER.error('Unsupported Record({:s})'.format(str(dlt_record))) diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py new file mode 100644 index 0000000000000000000000000000000000000000..5729fe733c3a9a8f73f188b40338160ab286998b --- /dev/null +++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py @@ -0,0 +1,288 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading +from typing import Dict, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId, + TopologyEvent) +from common.tools.context_queries.CheckType import ( + device_type_is_datacenter, device_type_is_network, endpoint_type_is_border) +from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology +from common.tools.context_queries.Link import get_links_in_topology +from common.tools.context_queries.Topology import create_missing_topologies +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from dlt.connector.client.DltConnectorClient import DltConnectorClient +from .AbstractDevice import AbstractDevice +from .AbstractLink import AbstractLink +from .DltRecordSender import DltRecordSender +from .Types import EventTypes + +LOGGER = logging.getLogger(__name__) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) +INTERDOMAIN_TOPOLOGY_ID = TopologyId(**json_topology_id(INTERDOMAIN_TOPOLOGY_UUID, context_id=ADMIN_CONTEXT_ID)) + +class TopologyAbstractor(threading.Thread): + def __init__(self) -> None: + super().__init__(daemon=True) + self.terminate = threading.Event() + + self.context_client = ContextClient() + self.dlt_connector_client = DltConnectorClient() + self.context_event_collector = EventsCollector(self.context_client) + + self.real_to_abstract_device_uuid : Dict[str, str] = dict() + self.real_to_abstract_link_uuid : Dict[str, str] = dict() + + self.abstract_device_to_topology_id : Dict[str, TopologyId] = dict() + self.abstract_link_to_topology_id : Dict[str, TopologyId] = dict() + + self.abstract_devices : Dict[str, AbstractDevice] = dict() + self.abstract_links : Dict[Tuple[str,str], AbstractLink] = dict() + + def stop(self): + self.terminate.set() + + def run(self) -> None: + self.context_client.connect() + create_context(self.context_client, DEFAULT_CONTEXT_UUID) + topology_uuids = [DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID] + create_missing_topologies(self.context_client, ADMIN_CONTEXT_ID, topology_uuids) + + self.dlt_connector_client.connect() + self.context_event_collector.start() + + while not self.terminate.is_set(): + event = self.context_event_collector.get_event(timeout=0.1) + if event is None: continue + #if self.ignore_event(event): continue + LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event))) + self.update_abstraction(event) + + self.context_event_collector.stop() + self.context_client.close() + self.dlt_connector_client.close() + + #def ignore_event(self, event : EventTypes) -> List[DltRecordIdTypes]: + # # TODO: filter events resulting from abstraction computation + # # TODO: filter events resulting from updating remote abstractions + # if self.own_context_id is None: return False + # own_context_uuid = self.own_context_id.context_uuid.uuid + # + # if isinstance(event, ContextEvent): + # context_uuid = event.context_id.context_uuid.uuid + # return context_uuid == own_context_uuid + # elif isinstance(event, TopologyEvent): + # context_uuid = event.topology_id.context_id.context_uuid.uuid + # if context_uuid != own_context_uuid: return True + # topology_uuid = event.topology_id.topology_uuid.uuid + # if topology_uuid in {INTERDOMAIN_TOPOLOGY_UUID}: return True + # + # return False + + def _get_or_create_abstract_device( + self, device_uuid : str, device_type : DeviceTypeEnum, dlt_record_sender : DltRecordSender, + abstract_topology_id : TopologyId + ) -> AbstractDevice: + abstract_device = self.abstract_devices.get(device_uuid) + changed = False + if abstract_device is None: + abstract_device = AbstractDevice(device_uuid, device_type) + changed = abstract_device.initialize() + if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) + self.abstract_devices[device_uuid] = abstract_device + self.abstract_device_to_topology_id[device_uuid] = abstract_topology_id + return abstract_device + + def _update_abstract_device( + self, device : Device, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId, + abstract_device_uuid : Optional[str] = None + ) -> None: + device_uuid = device.device_id.device_uuid.uuid + if device_type_is_datacenter(device.device_type): + abstract_device_uuid = device_uuid + abstract_device = self._get_or_create_abstract_device( + device_uuid, DeviceTypeEnum.EMULATED_DATACENTER, dlt_record_sender, abstract_topology_id) + elif device_type_is_network(device.device_type): + LOGGER.warning('device_type is network; not implemented') + return + else: + abstract_device = self._get_or_create_abstract_device( + abstract_device_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + self.real_to_abstract_device_uuid[device_uuid] = abstract_device_uuid + changed = abstract_device.update_endpoints(device) + if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) + + def _get_or_create_abstract_link( + self, link_uuid : str, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId + ) -> AbstractLink: + abstract_link = self.abstract_links.get(link_uuid) + changed = False + if abstract_link is None: + abstract_link = AbstractLink(link_uuid) + changed = abstract_link.initialize() + if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link) + self.abstract_links[link_uuid] = abstract_link + self.abstract_link_to_topology_id[link_uuid] = abstract_topology_id + return abstract_link + + def _get_link_endpoint_data(self, endpoint_id : EndPointId) -> Optional[Tuple[AbstractDevice, EndPoint]]: + device_uuid : str = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid : str = endpoint_id.endpoint_uuid.uuid + abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid) + if abstract_device_uuid is None: return None + abstract_device = self.abstract_devices.get(abstract_device_uuid) + if abstract_device is None: return None + endpoint = abstract_device.get_endpoint(device_uuid, endpoint_uuid) + if endpoint is None: return None + return abstract_device, endpoint + + def _compute_abstract_link(self, link : Link) -> Optional[str]: + if len(link.link_endpoint_ids) != 2: return None + + link_endpoint_data_A = self._get_link_endpoint_data(link.link_endpoint_ids[0]) + if link_endpoint_data_A is None: return None + abstract_device_A, endpoint_A = link_endpoint_data_A + if not endpoint_type_is_border(endpoint_A.endpoint_type): return None + + link_endpoint_data_Z = self._get_link_endpoint_data(link.link_endpoint_ids[-1]) + if link_endpoint_data_Z is None: return None + abstract_device_Z, endpoint_Z = link_endpoint_data_Z + if not endpoint_type_is_border(endpoint_Z.endpoint_type): return None + + link_uuid = AbstractLink.compose_uuid( + abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid, + abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid + ) + + # sort endpoints lexicographically to prevent duplicities + link_endpoint_uuids = sorted([ + (abstract_device_A.uuid, endpoint_A.endpoint_id.endpoint_uuid.uuid), + (abstract_device_Z.uuid, endpoint_Z.endpoint_id.endpoint_uuid.uuid) + ]) + + return link_uuid, link_endpoint_uuids + + def _update_abstract_link( + self, link : Link, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId + ) -> None: + abstract_link_specs = self._compute_abstract_link(link) + if abstract_link_specs is None: return + abstract_link_uuid, link_endpoint_uuids = abstract_link_specs + + abstract_link = self._get_or_create_abstract_link(abstract_link_uuid, dlt_record_sender, abstract_topology_id) + link_uuid = link.link_id.link_uuid.uuid + self.real_to_abstract_link_uuid[link_uuid] = abstract_link_uuid + changed = abstract_link.update_endpoints(link_endpoint_uuids) + if changed: dlt_record_sender.add_link(abstract_topology_id, abstract_link.link) + + def _infer_abstract_links(self, device : Device, dlt_record_sender : DltRecordSender) -> None: + device_uuid = device.device_id.device_uuid.uuid + + interdomain_device_uuids = get_uuids_of_devices_in_topology( + self.context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + + for endpoint in device.device_endpoints: + if not endpoint_type_is_border(endpoint.endpoint_type): continue + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + + abstract_link_uuid = AbstractLink.compose_uuid(device_uuid, endpoint_uuid, endpoint_uuid, device_uuid) + if abstract_link_uuid in self.abstract_links: continue + + if endpoint_uuid not in interdomain_device_uuids: continue + remote_device = self.context_client.GetDevice(DeviceId(**json_device_id(endpoint_uuid))) + remote_device_border_endpoint_uuids = { + endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type + for endpoint in remote_device.device_endpoints + if endpoint_type_is_border(endpoint.endpoint_type) + } + if device_uuid not in remote_device_border_endpoint_uuids: continue + + link_endpoint_uuids = sorted([(device_uuid, endpoint_uuid), (endpoint_uuid, device_uuid)]) + + abstract_link = self._get_or_create_abstract_link( + abstract_link_uuid, dlt_record_sender, INTERDOMAIN_TOPOLOGY_ID) + changed = abstract_link.update_endpoints(link_endpoint_uuids) + if changed: dlt_record_sender.add_link(INTERDOMAIN_TOPOLOGY_ID, abstract_link.link) + + def update_abstraction(self, event : EventTypes) -> None: + dlt_record_sender = DltRecordSender(self.context_client, self.dlt_connector_client) + + if isinstance(event, ContextEvent): + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, TopologyEvent): + topology_id = event.topology_id + topology_uuid = topology_id.topology_uuid.uuid + context_id = topology_id.context_id + context_uuid = context_id.context_uuid.uuid + topology_uuids = {DEFAULT_TOPOLOGY_UUID, INTERDOMAIN_TOPOLOGY_UUID} + if (context_uuid == DEFAULT_CONTEXT_UUID) and (topology_uuid not in topology_uuids): + abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID)) + self._get_or_create_abstract_device( + topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + + devices = get_devices_in_topology(self.context_client, context_id, topology_uuid) + for device in devices: + self._update_abstract_device( + device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=topology_uuid) + + links = get_links_in_topology(self.context_client, context_id, topology_uuid) + for link in links: + self._update_abstract_link(link, dlt_record_sender, abstract_topology_id) + + for device in devices: + self._infer_abstract_links(device, dlt_record_sender) + + else: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + + elif isinstance(event, DeviceEvent): + device_id = event.device_id + device_uuid = device_id.device_uuid.uuid + abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid) + device = self.context_client.GetDevice(device_id) + if abstract_device_uuid is None: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + else: + abstract_topology_id = self.abstract_device_to_topology_id[abstract_device_uuid] + self._update_abstract_device( + device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=abstract_device_uuid) + + self._infer_abstract_links(device, dlt_record_sender) + + elif isinstance(event, LinkEvent): + link_id = event.link_id + link_uuid = link_id.link_uuid.uuid + abstract_link_uuid = self.real_to_abstract_link_uuid.get(link_uuid) + if abstract_link_uuid is None: + LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + else: + abstract_topology_id = self.abstract_link_to_topology_id[abstract_link_uuid] + link = self.context_client.GetLink(link_id) + self._update_abstract_link(link, dlt_record_sender, abstract_topology_id) + + else: + LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event))) + + dlt_record_sender.commit() diff --git a/src/interdomain/service/topology_abstractor/Types.py b/src/interdomain/service/topology_abstractor/Types.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a0fa7a1d7a564045b6e850c2b46cf313da52b7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/Types.py @@ -0,0 +1,25 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union +from common.proto.context_pb2 import ( + ConnectionEvent, ContextEvent, Device, DeviceEvent, DeviceId, Link, LinkEvent, LinkId, Service, ServiceEvent, + ServiceId, Slice, SliceEvent, SliceId, TopologyEvent) + +DltRecordIdTypes = Union[DeviceId, LinkId, SliceId, ServiceId] +DltRecordTypes = Union[Device, Link, Slice, Service] + +EventTypes = Union[ + ContextEvent, TopologyEvent, DeviceEvent, LinkEvent, ServiceEvent, SliceEvent, ConnectionEvent +] diff --git a/src/interdomain/service/topology_abstractor/__init__.py b/src/interdomain/service/topology_abstractor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/interdomain/service/topology_abstractor/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/monitoring/client/MonitoringClient.py b/src/monitoring/client/MonitoringClient.py index 73607a081cd57e7c62b9c4e2c5e487868e72d189..5641b9cf3236c5fecfa5c6efe3a03b899c342ea5 100644 --- a/src/monitoring/client/MonitoringClient.py +++ b/src/monitoring/client/MonitoringClient.py @@ -22,7 +22,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from common.proto.context_pb2 import Empty from common.proto.monitoring_pb2 import Kpi, KpiDescriptor, KpiId, MonitorKpiRequest, \ KpiDescriptorList, KpiQuery, KpiList, SubsDescriptor, SubscriptionID, SubsList, \ - SubsResponse, AlarmDescriptor, AlarmID, AlarmList, AlarmResponse, AlarmSubscription + SubsResponse, AlarmDescriptor, AlarmID, AlarmList, AlarmResponse, AlarmSubscription, RawKpiTable from common.proto.monitoring_pb2_grpc import MonitoringServiceStub LOGGER = logging.getLogger(__name__) @@ -93,7 +93,7 @@ class MonitoringClient: return response @RETRY_DECORATOR - def QueryKpiData(self, request : KpiQuery) -> KpiList: + def QueryKpiData(self, request : KpiQuery) -> RawKpiTable: LOGGER.debug('QueryKpiData: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.QueryKpiData(request) LOGGER.debug('QueryKpiData result: {:s}'.format(grpc_message_to_json_string(response))) diff --git a/src/monitoring/service/AlarmManager.py b/src/monitoring/service/AlarmManager.py index e5ac8915c3728c7894dc70ab901215dd5a7feb41..873a65d2c8041e6378f84d979bb1fd98d4d61d6b 100644 --- a/src/monitoring/service/AlarmManager.py +++ b/src/monitoring/service/AlarmManager.py @@ -1,3 +1,4 @@ +import pytz from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.executors.pool import ProcessPoolExecutor from apscheduler.jobstores.base import JobLookupError @@ -19,10 +20,16 @@ class AlarmManager(): end_date=None if subscription_timeout_s: start_timestamp=time.time() - start_date=datetime.fromtimestamp(start_timestamp) - end_date=datetime.fromtimestamp(start_timestamp+subscription_timeout_s) - self.scheduler.add_job(self.metrics_db.get_alarm_data, args=(alarm_queue,kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms),trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date, end_date=end_date, id=alarm_id) + end_timestamp = start_timestamp + subscription_timeout_s + start_date = datetime.utcfromtimestamp(start_timestamp).isoformat() + end_date = datetime.utcfromtimestamp(end_timestamp).isoformat() + + job = self.scheduler.add_job(self.metrics_db.get_alarm_data, + args=(alarm_queue,kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms), + trigger='interval', seconds=(subscription_frequency_ms/1000), start_date=start_date, + end_date=end_date,timezone=pytz.utc, id=str(alarm_id)) LOGGER.debug(f"Alarm job {alarm_id} succesfully created") + #job.remove() def delete_alarm(self, alarm_id): try: diff --git a/src/monitoring/service/ManagementDBTools.py b/src/monitoring/service/ManagementDBTools.py index 2387ddde0ab9eecea6c8fc982ba97a94f1a88c98..2185a3986532ad1b8e629cdcdb66079f23995c8f 100644 --- a/src/monitoring/service/ManagementDBTools.py +++ b/src/monitoring/service/ManagementDBTools.py @@ -38,7 +38,10 @@ class ManagementDB(): kpi_sample_type INTEGER, device_id INTEGER, endpoint_id INTEGER, - service_id INTEGER + service_id INTEGER, + slice_id INTEGER, + connection_id INTEGER, + monitor_flag INTEGER ); """) LOGGER.debug("KPI table created in the ManagementDB") @@ -84,13 +87,13 @@ class ManagementDB(): LOGGER.debug(f"Alarm table cannot be created in the ManagementDB. {e}") raise Exception - def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id): + def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id): try: c = self.client.cursor() - c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id)) + c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ? AND slice_id is ? AND connection_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id,slice_id,connection_id)) data=c.fetchone() if data is None: - c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id)) + c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id) VALUES (?,?,?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id,slice_id,connection_id)) self.client.commit() kpi_id = c.lastrowid LOGGER.debug(f"KPI {kpi_id} succesfully inserted in the ManagementDB") @@ -245,4 +248,41 @@ class ManagementDB(): LOGGER.debug(f"Alarms succesfully retrieved from the ManagementDB") return data except sqlite3.Error as e: - LOGGER.debug(f"Alarms cannot be retrieved from the ManagementDB: {e}") \ No newline at end of file + LOGGER.debug(f"Alarms cannot be retrieved from the ManagementDB: {e}") + + def check_monitoring_flag(self,kpi_id): + try: + c = self.client.cursor() + c.execute("SELECT monitor_flag FROM kpi WHERE kpi_id is ?",(kpi_id,)) + data=c.fetchone() + if data is None: + LOGGER.debug(f"KPI {kpi_id} does not exists") + return None + else: + if data[0] == 1: + return True + elif data[0] == 0: + return False + else: + LOGGER.debug(f"KPI {kpi_id} is wrong") + return None + except sqlite3.Error as e: + LOGGER.debug(f"KPI {kpi_id} cannot be checked from the ManagementDB: {e}") + + + def set_monitoring_flag(self,kpi_id,flag): + try: + c = self.client.cursor() + data = c.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,)).fetchone() + if data is None: + LOGGER.debug(f"KPI {kpi_id} does not exists") + return None + else: + if flag : + value = 1 + else: + value = 0 + c.execute("UPDATE kpi SET monitor_flag = ? WHERE kpi_id is ?",(value,kpi_id)) + return True + except sqlite3.Error as e: + LOGGER.debug(f"KPI {kpi_id} cannot be checked from the ManagementDB: {e}") \ No newline at end of file diff --git a/src/monitoring/service/MetricsDBTools.py b/src/monitoring/service/MetricsDBTools.py index 16e6373f542656b4c172c8d619bf3f17ca5df404..1d3888d5348bdbe2995f077310ca448827290382 100644 --- a/src/monitoring/service/MetricsDBTools.py +++ b/src/monitoring/service/MetricsDBTools.py @@ -87,6 +87,8 @@ class MetricsDB(): 'device_id SYMBOL,' \ 'endpoint_id SYMBOL,' \ 'service_id SYMBOL,' \ + 'slice_id SYMBOL,' \ + 'connection_id SYMBOL,' \ 'timestamp TIMESTAMP,' \ 'kpi_value DOUBLE)' \ 'TIMESTAMP(timestamp);' @@ -97,7 +99,7 @@ class MetricsDB(): LOGGER.debug(f"Table {self.table} cannot be created. {e}") raise Exception - def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, kpi_value): + def write_KPI(self, time, kpi_id, kpi_sample_type, device_id, endpoint_id, service_id, slice_id, connection_id, kpi_value): counter = 0 while (counter < self.retries): try: @@ -109,7 +111,9 @@ class MetricsDB(): 'kpi_sample_type': kpi_sample_type, 'device_id': device_id, 'endpoint_id': endpoint_id, - 'service_id': service_id}, + 'service_id': service_id, + 'slice_id': slice_id, + 'connection_id': connection_id,}, columns={ 'kpi_value': kpi_value}, at=datetime.datetime.fromtimestamp(time)) @@ -170,11 +174,54 @@ class MetricsDB(): if connection: connection.close() + def get_raw_kpi_list(self, kpi_id, monitoring_window_s, last_n_samples, start_timestamp, end_timestamp): + try: + query_root = f"SELECT timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' " + query = query_root + start_date = float() + end_date = float() + if last_n_samples: + query = query + f"ORDER BY timestamp DESC limit {last_n_samples}" + elif monitoring_window_s or start_timestamp or end_timestamp: + if start_timestamp and end_timestamp: + start_date = start_timestamp + end_date = end_timestamp + elif monitoring_window_s: + if start_timestamp and not end_timestamp: + start_date = start_timestamp + end_date = start_date + monitoring_window_s + elif end_timestamp and not start_timestamp: + end_date = end_timestamp + start_date = end_date - monitoring_window_s + elif not start_timestamp and not end_timestamp: + end_date = timestamp_utcnow_to_float() + start_date = end_date - monitoring_window_s + query = query + f"AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')" + else: + LOGGER.debug(f"Wrong parameters settings") + + LOGGER.debug(query) + + if self.postgre: + kpi_list = self.run_query_postgre(query) + LOGGER.debug(f"kpi_list postgre: {kpi_list}") + else: + kpi_list = self.run_query(query) + LOGGER.debug(f"kpi_list influx: {kpi_list}") + if kpi_list: + LOGGER.debug(f"New data received for subscription to KPI {kpi_id}") + return kpi_list + else: + LOGGER.debug(f"No new data for the subscription to KPI {kpi_id}") + except (Exception) as e: + LOGGER.debug(f"Subscription data cannot be retrieved. {e}") + def get_subscription_data(self,subs_queue, kpi_id, sampling_interval_s=1): try: end_date = timestamp_utcnow_to_float() - self.commit_lag_ms / 1000 start_date = end_date - sampling_interval_s query = f"SELECT kpi_id, timestamp, kpi_value FROM {self.table} WHERE kpi_id = '{kpi_id}' AND (timestamp BETWEEN '{timestamp_float_to_string(start_date)}' AND '{timestamp_float_to_string(end_date)}')" + LOGGER.debug(query) if self.postgre: kpi_list = self.run_query_postgre(query) LOGGER.debug(f"kpi_list postgre: {kpi_list}") @@ -201,6 +248,8 @@ class MetricsDB(): kpi_list = self.run_query(query) if kpi_list: LOGGER.debug(f"New data received for alarm of KPI {kpi_id}") + LOGGER.info(kpi_list) + valid_kpi_list = [] for kpi in kpi_list: alarm = False kpi_value = kpi[2] @@ -263,10 +312,10 @@ class MetricsDB(): if (kpi_value >= kpiMaxValue): alarm = True if alarm: - # queue.append[kpi] - alarm_queue.put_nowait(kpi) - LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}") - else: - LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}") + valid_kpi_list.append(kpi) + alarm_queue.put_nowait(valid_kpi_list) + LOGGER.debug(f"Alarm of KPI {kpi_id} triggered -> kpi_value:{kpi[2]}, timestamp:{kpi[1]}") + else: + LOGGER.debug(f"No new data for the alarm of KPI {kpi_id}") except (Exception) as e: LOGGER.debug(f"Alarm data cannot be retrieved. {e}") \ No newline at end of file diff --git a/src/monitoring/service/MonitoringServiceServicerImpl.py b/src/monitoring/service/MonitoringServiceServicerImpl.py index 7cd47f187986a0c32eea2ac8405183ac4418d100..548f34c8a07a1d8df17f2702879dbbadf60f6d13 100644 --- a/src/monitoring/service/MonitoringServiceServicerImpl.py +++ b/src/monitoring/service/MonitoringServiceServicerImpl.py @@ -26,9 +26,9 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.monitoring_pb2_grpc import MonitoringServiceServicer from common.proto.monitoring_pb2 import AlarmResponse, AlarmDescriptor, AlarmList, SubsList, KpiId, \ KpiDescriptor, KpiList, KpiQuery, SubsDescriptor, SubscriptionID, AlarmID, KpiDescriptorList, \ - MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse + MonitorKpiRequest, Kpi, AlarmSubscription, SubsResponse, RawKpiTable, RawKpi, RawKpiList from common.rpc_method_wrapper.ServiceExceptions import ServiceException -from common.tools.timestamp.Converters import timestamp_string_to_float +from common.tools.timestamp.Converters import timestamp_string_to_float, timestamp_utcnow_to_float from monitoring.service import ManagementDBTools, MetricsDBTools from device.client.DeviceClient import DeviceClient @@ -85,13 +85,16 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): kpi_device_id = request.device_id.device_uuid.uuid kpi_endpoint_id = request.endpoint_id.endpoint_uuid.uuid kpi_service_id = request.service_id.service_uuid.uuid + kpi_slice_id = request.slice_id.slice_uuid.uuid + kpi_connection_id = request.connection_id.connection_uuid.uuid - if request.kpi_id.kpi_id.uuid is not "": + + if request.kpi_id.kpi_id.uuid != "": response.kpi_id.uuid = request.kpi_id.kpi_id.uuid # Here the code to modify an existing kpi else: data = self.management_db.insert_KPI( - kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id) + kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id, kpi_slice_id, kpi_connection_id) response.kpi_id.uuid = str(data) return response @@ -131,11 +134,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): if kpi_db is None: LOGGER.info('GetKpiDescriptor error: KpiID({:s}): not found in database'.format(str(kpi_id))) else: - kpiDescriptor.kpi_description = kpi_db[1] - kpiDescriptor.kpi_sample_type = kpi_db[2] - kpiDescriptor.device_id.device_uuid.uuid = str(kpi_db[3]) - kpiDescriptor.endpoint_id.endpoint_uuid.uuid = str(kpi_db[4]) - kpiDescriptor.service_id.service_uuid.uuid = str(kpi_db[5]) + kpiDescriptor.kpi_description = kpi_db[1] + kpiDescriptor.kpi_sample_type = kpi_db[2] + kpiDescriptor.device_id.device_uuid.uuid = str(kpi_db[3]) + kpiDescriptor.endpoint_id.endpoint_uuid.uuid = str(kpi_db[4]) + kpiDescriptor.service_id.service_uuid.uuid = str(kpi_db[5]) + kpiDescriptor.slice_id.slice_uuid.uuid = str(kpi_db[6]) + kpiDescriptor.connection_id.connection_uuid.uuid = str(kpi_db[7]) return kpiDescriptor except ServiceException as e: LOGGER.exception('GetKpiDescriptor exception') @@ -154,12 +159,14 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): for item in data: kpi_descriptor = KpiDescriptor() - kpi_descriptor.kpi_id.kpi_id.uuid = str(item[0]) - kpi_descriptor.kpi_description = item[1] - kpi_descriptor.kpi_sample_type = item[2] - kpi_descriptor.device_id.device_uuid.uuid = str(item[3]) - kpi_descriptor.endpoint_id.endpoint_uuid.uuid = str(item[4]) - kpi_descriptor.service_id.service_uuid.uuid = str(item[5]) + kpi_descriptor.kpi_id.kpi_id.uuid = str(item[0]) + kpi_descriptor.kpi_description = item[1] + kpi_descriptor.kpi_sample_type = item[2] + kpi_descriptor.device_id.device_uuid.uuid = str(item[3]) + kpi_descriptor.endpoint_id.endpoint_uuid.uuid = str(item[4]) + kpi_descriptor.service_id.service_uuid.uuid = str(item[5]) + kpi_descriptor.slice_id.slice_uuid.uuid = str(item[6]) + kpi_descriptor.connection_id.connection_uuid.uuid = str(item[7]) kpi_descriptor_list.kpi_descriptor_list.append(kpi_descriptor) @@ -186,11 +193,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): deviceId = kpiDescriptor.device_id.device_uuid.uuid endpointId = kpiDescriptor.endpoint_id.endpoint_uuid.uuid serviceId = kpiDescriptor.service_id.service_uuid.uuid + sliceId = kpiDescriptor.slice_id.slice_uuid.uuid + connectionId = kpiDescriptor.connection_id.connection_uuid.uuid time_stamp = request.timestamp.timestamp kpi_value = getattr(request.kpi_value, request.kpi_value.WhichOneof('value')) # Build the structure to be included as point in the MetricsDB - self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, kpi_value) + self.metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, kpi_value) return Empty() except ServiceException as e: @@ -220,8 +229,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): monitor_device_request.sampling_duration_s = request.monitoring_window_s monitor_device_request.sampling_interval_s = request.sampling_rate_s - device_client = DeviceClient() - device_client.MonitorDeviceKpi(monitor_device_request) + if not self.management_db.check_monitoring_flag(kpi_id): + device_client = DeviceClient() + device_client.MonitorDeviceKpi(monitor_device_request) + self.management_db.set_monitoring_flag(kpi_id,True) + self.management_db.check_monitoring_flag(kpi_id) + else: + LOGGER.warning('MonitorKpi warning: KpiID({:s}) is currently being monitored'.format(str(kpi_id))) else: LOGGER.info('MonitorKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) return response @@ -234,12 +248,48 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) # CREATEKPI_COUNTER_FAILED.inc() - def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> KpiList: + def QueryKpiData(self, request: KpiQuery, grpc_context: grpc.ServicerContext) -> RawKpiTable: LOGGER.info('QueryKpiData') try: - # TBC - return KpiList() + raw_kpi_table = RawKpiTable() + + LOGGER.debug(str(request)) + + kpi_id_list = request.kpi_ids + monitoring_window_s = request.monitoring_window_s + last_n_samples = request.last_n_samples + start_timestamp = request.start_timestamp.timestamp + end_timestamp = request.end_timestamp.timestamp + + # Check if all the Kpi_ids exist + for item in kpi_id_list: + kpi_id = item.kpi_id.uuid + + kpiDescriptor = self.GetKpiDescriptor(item, grpc_context) + if kpiDescriptor is None: + LOGGER.info('QueryKpiData error: KpiID({:s}): not found in database'.format(str(kpi_id))) + break + else: + # Execute query per Kpi_id and introduce their kpi_list in the table + kpi_list = self.metrics_db.get_raw_kpi_list(kpi_id,monitoring_window_s,last_n_samples,start_timestamp,end_timestamp) + raw_kpi_list = RawKpiList() + raw_kpi_list.kpi_id.kpi_id.uuid = kpi_id + + LOGGER.debug(str(kpi_list)) + + if kpi_list is None: + LOGGER.info('QueryKpiData error: KpiID({:s}): points not found in metrics database'.format(str(kpi_id))) + else: + for item in kpi_list: + raw_kpi = RawKpi() + raw_kpi.timestamp.timestamp = timestamp_string_to_float(item[0]) + raw_kpi.kpi_value.floatVal = item[1] + raw_kpi_list.raw_kpis.append(raw_kpi) + + raw_kpi_table.raw_kpi_lists.append(raw_kpi_list) + + return raw_kpi_table except ServiceException as e: LOGGER.exception('QueryKpiData exception') grpc_context.abort(e.code, e.details) @@ -250,9 +300,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.info('SubscribeKpi') try: - subs_queue = Queue() - subs_response = SubsResponse() kpi_id = request.kpi_id.kpi_id.uuid sampling_duration_s = request.sampling_duration_s @@ -268,18 +316,21 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): start_timestamp, end_timestamp) # parse queue to append kpis into the list - while not subs_queue.empty(): - list = subs_queue.get_nowait() - for item in list: - kpi = Kpi() - kpi.kpi_id.kpi_id.uuid = str(item[0]) - kpi.timestamp.timestamp = timestamp_string_to_float(item[1]) - kpi.kpi_value.floatVal = item[2] # This must be improved - subs_response.kpi_list.kpi.append(kpi) - - subs_response.subs_id.subs_id.uuid = str(subs_id) - - yield subs_response + while True: + while not subs_queue.empty(): + subs_response = SubsResponse() + list = subs_queue.get_nowait() + for item in list: + kpi = Kpi() + kpi.kpi_id.kpi_id.uuid = str(item[0]) + kpi.timestamp.timestamp = timestamp_string_to_float(item[1]) + kpi.kpi_value.floatVal = item[2] # This must be improved + subs_response.kpi_list.kpi.append(kpi) + subs_response.subs_id.subs_id.uuid = str(subs_id) + yield subs_response + if timestamp_utcnow_to_float() > end_timestamp: + break + # yield subs_response except ServiceException as e: LOGGER.exception('SubscribeKpi exception') grpc_context.abort(e.code, e.details) @@ -373,7 +424,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.debug(f"request.AlarmID: {request.alarm_id.alarm_id.uuid}") - if request.alarm_id.alarm_id.uuid is not "": + if request.alarm_id.alarm_id.uuid != "": alarm_id = request.alarm_id.alarm_id.uuid # Here the code to modify an existing alarm else: @@ -424,6 +475,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.info('GetAlarmDescriptor') try: alarm_id = request.alarm_id.uuid + LOGGER.debug(alarm_id) alarm = self.management_db.get_alarm(alarm_id) response = AlarmDescriptor() @@ -454,15 +506,13 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): LOGGER.info('GetAlarmResponseStream') try: alarm_id = request.alarm_id.alarm_id.uuid - alarm = self.management_db.get_alarm(alarm_id) - alarm_response = AlarmResponse() - - if alarm: + alarm_data = self.management_db.get_alarm(alarm_id) + real_start_time = timestamp_utcnow_to_float() + if alarm_data: + LOGGER.debug(f"{alarm_data}") alarm_queue = Queue() - alarm_data = self.management_db.get_alarm(alarm) - alarm_id = request.alarm_id.alarm_id.uuid kpi_id = alarm_data[3] kpiMinValue = alarm_data[4] @@ -473,24 +523,30 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): subscription_frequency_ms = request.subscription_frequency_ms subscription_timeout_s = request.subscription_timeout_s + end_timestamp = real_start_time + subscription_timeout_s + self.alarm_manager.create_alarm(alarm_queue, alarm_id, kpi_id, kpiMinValue, kpiMaxValue, inRange, includeMinValue, includeMaxValue, subscription_frequency_ms, subscription_timeout_s) - while not alarm_queue.empty(): - list = alarm_queue.get_nowait() - for item in list: - kpi = Kpi() - kpi.kpi_id.kpi_id.uuid = str(item[0]) - kpi.timestamp.timestamp = timestamp_string_to_float(item[1]) - kpi.kpi_value.floatVal = item[2] # This must be improved - alarm_response.kpi_list.kpi.append(kpi) - - alarm_response.alarm_id.alarm_id.uuid = alarm_id - - yield alarm_response + while True: + while not alarm_queue.empty(): + alarm_response = AlarmResponse() + list = alarm_queue.get_nowait() + size = len(list) + for item in list: + kpi = Kpi() + kpi.kpi_id.kpi_id.uuid = str(item[0]) + kpi.timestamp.timestamp = timestamp_string_to_float(item[1]) + kpi.kpi_value.floatVal = item[2] # This must be improved + alarm_response.kpi_list.kpi.append(kpi) + alarm_response.alarm_id.alarm_id.uuid = alarm_id + yield alarm_response + if timestamp_utcnow_to_float() > end_timestamp: + break else: LOGGER.info('GetAlarmResponseStream error: AlarmID({:s}): not found in database'.format(str(alarm_id))) + alarm_response = AlarmResponse() alarm_response.alarm_id.alarm_id.uuid = "NoID" return alarm_response except ServiceException as e: @@ -527,7 +583,7 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): kpi_db = self.management_db.get_KPI(int(kpi_id)) response = Kpi() if kpi_db is None: - LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) + LOGGER.info('GetStreamKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) response.kpi_id.kpi_id.uuid = "NoID" return response else: @@ -540,26 +596,29 @@ class MonitoringServiceServicerImpl(MonitoringServiceServicer): try: kpi_id = request.kpi_id.uuid response = Kpi() - if kpi_id is "": + if kpi_id == "": LOGGER.info('GetInstantKpi error: KpiID({:s}): not found in database'.format(str(kpi_id))) response.kpi_id.kpi_id.uuid = "NoID" else: query = f"SELECT kpi_id, timestamp, kpi_value FROM monitoring WHERE kpi_id = '{kpi_id}' " \ f"LATEST ON timestamp PARTITION BY kpi_id" - data = self.metrics_db.run_query(query)[0] + data = self.metrics_db.run_query(query) LOGGER.debug(data) - - response.kpi_id.kpi_id.uuid = str(data[0]) - response.timestamp.timestamp = timestamp_string_to_float(data[1]) - response.kpi_value.floatVal = data[2] # This must be improved + if len(data) == 0: + response.kpi_id.kpi_id.uuid = request.kpi_id.uuid + else: + _data = data[0] + response.kpi_id.kpi_id.uuid = str(_data[0]) + response.timestamp.timestamp = timestamp_string_to_float(_data[1]) + response.kpi_value.floatVal = _data[2] return response except ServiceException as e: - LOGGER.exception('SetKpi exception') + LOGGER.exception('GetInstantKpi exception') # CREATEKPI_COUNTER_FAILED.inc() grpc_context.abort(e.code, e.details) except Exception as e: # pragma: no cover - LOGGER.exception('SetKpi exception') + LOGGER.exception('GetInstantKpi exception') # CREATEKPI_COUNTER_FAILED.inc() grpc_context.abort(grpc.StatusCode.INTERNAL, str(e)) diff --git a/src/monitoring/service/SubscriptionManager.py b/src/monitoring/service/SubscriptionManager.py index fe27d6ee365676b05175b762a106621121e3b897..3d1da36b7c5f66c28d3885a305660d6971f695b1 100644 --- a/src/monitoring/service/SubscriptionManager.py +++ b/src/monitoring/service/SubscriptionManager.py @@ -42,14 +42,12 @@ class SubscriptionManager(): if end_timestamp: end_date = datetime.utcfromtimestamp(end_timestamp).isoformat() - LOGGER.debug(f"kpi_id: {kpi_id}") - LOGGER.debug(f"sampling_interval_s: {sampling_interval_s}") - LOGGER.debug(f"subscription_id: {subscription_id}") - LOGGER.debug(f"start_date: {start_date}") - self.scheduler.add_job(self.metrics_db.get_subscription_data, args=(subs_queue,kpi_id, sampling_interval_s), + job = self.scheduler.add_job(self.metrics_db.get_subscription_data, args=(subs_queue,kpi_id, sampling_interval_s), trigger='interval', seconds=sampling_interval_s, start_date=start_date, end_date=end_date, timezone=pytz.utc, id=str(subscription_id)) LOGGER.debug(f"Subscrition job {subscription_id} succesfully created") + #job.remove() def delete_subscription(self, subscription_id): - self.scheduler.remove_job(subscription_id) \ No newline at end of file + self.scheduler.remove_job(subscription_id) + LOGGER.debug(f"Subscription job {subscription_id} succesfully deleted") \ No newline at end of file diff --git a/src/monitoring/tests/Messages.py b/src/monitoring/tests/Messages.py index 845153856c44cec0576bd6f11b045e3310558a97..f15cb5ec2c1d14ed95731cd37e54cb714b29e8b7 100644 --- a/src/monitoring/tests/Messages.py +++ b/src/monitoring/tests/Messages.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import datetime from random import random from common.proto import monitoring_pb2 @@ -23,13 +22,15 @@ def kpi_id(): _kpi_id.kpi_id.uuid = str(1) # pylint: disable=maybe-no-member return _kpi_id -def create_kpi_request(): - _create_kpi_request = monitoring_pb2.KpiDescriptor() - _create_kpi_request.kpi_description = 'KPI Description Test' - _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED - _create_kpi_request.device_id.device_uuid.uuid = 'DEV1' # pylint: disable=maybe-no-member - _create_kpi_request.service_id.service_uuid.uuid = 'SERV1' # pylint: disable=maybe-no-member - _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END1' # pylint: disable=maybe-no-member +def create_kpi_request(kpi_id_str): + _create_kpi_request = monitoring_pb2.KpiDescriptor() + _create_kpi_request.kpi_description = 'KPI Description Test' + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED + _create_kpi_request.device_id.device_uuid.uuid = 'DEV' + str(kpi_id_str) + _create_kpi_request.service_id.service_uuid.uuid = 'SERV' + str(kpi_id_str) + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC' + str(kpi_id_str) + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END' + str(kpi_id_str) + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON' + str(kpi_id_str) return _create_kpi_request def create_kpi_request_b(): @@ -38,7 +39,9 @@ def create_kpi_request_b(): _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED _create_kpi_request.device_id.device_uuid.uuid = 'DEV2' # pylint: disable=maybe-no-member _create_kpi_request.service_id.service_uuid.uuid = 'SERV2' # pylint: disable=maybe-no-member + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC2' # pylint: disable=maybe-no-member _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END2' # pylint: disable=maybe-no-member + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON2' # pylint: disable=maybe-no-member return _create_kpi_request def create_kpi_request_c(): @@ -47,7 +50,9 @@ def create_kpi_request_c(): _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED _create_kpi_request.device_id.device_uuid.uuid = 'DEV3' # pylint: disable=maybe-no-member _create_kpi_request.service_id.service_uuid.uuid = 'SERV3' # pylint: disable=maybe-no-member + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC3' # pylint: disable=maybe-no-member _create_kpi_request.endpoint_id.endpoint_uuid.uuid = 'END3' # pylint: disable=maybe-no-member + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON3' # pylint: disable=maybe-no-member return _create_kpi_request def monitor_kpi_request(kpi_uuid, monitoring_window_s, sampling_rate_s): @@ -69,20 +74,32 @@ def kpi_descriptor_list(): return _kpi_descriptor_list -def kpi_query(): +def kpi_query(kpi_id_list): _kpi_query = monitoring_pb2.KpiQuery() + _kpi_query.kpi_ids.extend(kpi_id_list) + # _kpi_query.monitoring_window_s = 10 + # _kpi_query.last_n_samples = 2 + _kpi_query.start_timestamp.timestamp = timestamp_utcnow_to_float() - 10 + _kpi_query.end_timestamp.timestamp = timestamp_utcnow_to_float() + return _kpi_query def subs_descriptor(kpi_id): _subs_descriptor = monitoring_pb2.SubsDescriptor() + sampling_duration_s = 10 + sampling_interval_s = 3 + real_start_time = timestamp_utcnow_to_float() + start_timestamp = real_start_time + end_timestamp = start_timestamp + sampling_duration_s + _subs_descriptor.subs_id.subs_id.uuid = "" _subs_descriptor.kpi_id.kpi_id.uuid = kpi_id.kpi_id.uuid - _subs_descriptor.sampling_duration_s = 10 - _subs_descriptor.sampling_interval_s = 2 - _subs_descriptor.start_timestamp.timestamp = timestamp_utcnow_to_float() - _subs_descriptor.end_timestamp.timestamp = timestamp_utcnow_to_float() + 10 + _subs_descriptor.sampling_duration_s = sampling_duration_s + _subs_descriptor.sampling_interval_s = sampling_interval_s + _subs_descriptor.start_timestamp.timestamp = start_timestamp + _subs_descriptor.end_timestamp.timestamp = end_timestamp return _subs_descriptor @@ -91,14 +108,14 @@ def subs_id(): return _subs_id -def alarm_descriptor(): +def alarm_descriptor(kpi_id): _alarm_descriptor = monitoring_pb2.AlarmDescriptor() _alarm_descriptor.alarm_description = "Alarm Description" _alarm_descriptor.name = "Alarm Name" - _alarm_descriptor.kpi_id.kpi_id.uuid = "1" + _alarm_descriptor.kpi_id.kpi_id.uuid = kpi_id.kpi_id.uuid _alarm_descriptor.kpi_value_range.kpiMinValue.floatVal = 0.0 - _alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = 50.0 + _alarm_descriptor.kpi_value_range.kpiMaxValue.floatVal = 250.0 _alarm_descriptor.kpi_value_range.inRange = True _alarm_descriptor.kpi_value_range.includeMinValue = False _alarm_descriptor.kpi_value_range.includeMaxValue = True @@ -113,11 +130,16 @@ def alarm_descriptor_b(): return _alarm_descriptor def alarm_subscription(alarm_id): - _alarm_descriptor = monitoring_pb2.AlarmSubscription() + _alarm_subscription = monitoring_pb2.AlarmSubscription() - _alarm_descriptor.alarm_id.alarm_id.uuid = str(alarm_id) + subscription_timeout_s = 10 + subscription_frequency_ms = 1000 - return _alarm_descriptor + _alarm_subscription.alarm_id.alarm_id.uuid = str(alarm_id.alarm_id.uuid) + _alarm_subscription.subscription_timeout_s = subscription_timeout_s + _alarm_subscription.subscription_frequency_ms = subscription_frequency_ms + + return _alarm_subscription def alarm_id(): diff --git a/src/monitoring/tests/test_unitary.py b/src/monitoring/tests/test_unitary.py index ee6a29e8a483fe53c58a6e6d2e3aa240f2456b81..b113f5a7822841e17274300dc7102664bce1c409 100644 --- a/src/monitoring/tests/test_unitary.py +++ b/src/monitoring/tests/test_unitary.py @@ -15,11 +15,14 @@ import copy, os, pytest import threading import time +from queue import Queue +from random import random from time import sleep from typing import Tuple from apscheduler.executors.pool import ProcessPoolExecutor from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.schedulers.base import STATE_STOPPED from grpc._channel import _MultiThreadedRendezvous from common.Constants import ServiceNameEnum @@ -33,7 +36,8 @@ from common.message_broker.MessageBroker import MessageBroker from common.proto import monitoring_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.proto.monitoring_pb2 import KpiId, KpiDescriptor, KpiList, SubsDescriptor, SubsList, AlarmID, \ - AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse + AlarmDescriptor, AlarmList, Kpi, KpiDescriptorList, SubsResponse, AlarmResponse, RawKpiTable +from common.tools.timestamp.Converters import timestamp_utcnow_to_float, timestamp_string_to_float from context.client.ContextClient import ContextClient from context.service.grpc_server.ContextService import ContextService @@ -43,6 +47,9 @@ from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService from device.service.driver_api.DriverFactory import DriverFactory from device.service.driver_api.DriverInstanceCache import DriverInstanceCache +from monitoring.service.AlarmManager import AlarmManager +from monitoring.service.MetricsDBTools import MetricsDB +from monitoring.service.SubscriptionManager import SubscriptionManager os.environ['DEVICE_EMULATED_ONLY'] = 'TRUE' from device.service.drivers import DRIVERS # pylint: disable=wrong-import-position @@ -175,14 +182,23 @@ def subs_scheduler(): return _scheduler -def ingestion_data(monitoring_client): - _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) - _include_kpi_request = include_kpi_request(_kpi_id) +def ingestion_data(kpi_id_int): + metrics_db = MetricsDB("localhost", "9009", "9000", "monitoring") + + for i in range(50): + kpiSampleType = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '') + kpiId = kpi_id_int + deviceId = 'DEV'+ str(kpi_id_int) + endpointId = 'END' + str(kpi_id_int) + serviceId = 'SERV' + str(kpi_id_int) + sliceId = 'SLC' + str(kpi_id_int) + connectionId = 'CON' + str(kpi_id_int) + time_stamp = timestamp_utcnow_to_float() + kpi_value = 500*random() - for i in range(200): - _include_kpi_request = include_kpi_request(_kpi_id) - monitoring_client.IncludeKpi(_include_kpi_request) - time.sleep(0.01) + metrics_db.write_KPI(time_stamp, kpiId, kpiSampleType, deviceId, endpointId, serviceId, sliceId, connectionId, + kpi_value) + sleep(0.1) ########################### # Tests Implementation @@ -192,18 +208,17 @@ def ingestion_data(monitoring_client): def test_set_kpi(monitoring_client): # pylint: disable=redefined-outer-name # make call to server LOGGER.warning('test_create_kpi requesting') - response = monitoring_client.SetKpi(create_kpi_request()) - LOGGER.debug(str(response)) - response = monitoring_client.SetKpi(create_kpi_request_b()) - LOGGER.debug(str(response)) - assert isinstance(response, KpiId) + for i in range(3): + response = monitoring_client.SetKpi(create_kpi_request(str(i+1))) + LOGGER.debug(str(response)) + assert isinstance(response, KpiId) # Test case that makes use of client fixture to test server's DeleteKpi method def test_delete_kpi(monitoring_client): # pylint: disable=redefined-outer-name # make call to server LOGGER.warning('delete_kpi requesting') - response = monitoring_client.SetKpi(create_kpi_request_b()) + response = monitoring_client.SetKpi(create_kpi_request('4')) response = monitoring_client.DeleteKpi(response) LOGGER.debug(str(response)) assert isinstance(response, Empty) @@ -211,7 +226,7 @@ def test_delete_kpi(monitoring_client): # pylint: disable=redefined-outer-name # Test case that makes use of client fixture to test server's GetKpiDescriptor method def test_get_kpidescritor(monitoring_client): # pylint: disable=redefined-outer-name LOGGER.warning('test_getkpidescritor_kpi begin') - response = monitoring_client.SetKpi(create_kpi_request_c()) + response = monitoring_client.SetKpi(create_kpi_request('1')) response = monitoring_client.GetKpiDescriptor(response) LOGGER.debug(str(response)) assert isinstance(response, KpiDescriptor) @@ -227,7 +242,8 @@ def test_get_kpi_descriptor_list(monitoring_client): # pylint: disable=redefined def test_include_kpi(monitoring_client): # pylint: disable=redefined-outer-name # make call to server LOGGER.warning('test_include_kpi requesting') - kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) + kpi_id = monitoring_client.SetKpi(create_kpi_request('1')) + LOGGER.debug(str(kpi_id)) response = monitoring_client.IncludeKpi(include_kpi_request(kpi_id)) LOGGER.debug(str(response)) assert isinstance(response, Empty) @@ -261,44 +277,40 @@ def test_monitor_kpi( response = device_client.AddDevice(Device(**device_with_connect_rules)) assert response.device_uuid.uuid == DEVICE_DEV1_UUID - response = monitoring_client.SetKpi(create_kpi_request()) + response = monitoring_client.SetKpi(create_kpi_request('1')) _monitor_kpi_request = monitor_kpi_request(response.kpi_id.uuid, 120, 5) # pylint: disable=maybe-no-member response = monitoring_client.MonitorKpi(_monitor_kpi_request) LOGGER.debug(str(response)) assert isinstance(response, Empty) # Test case that makes use of client fixture to test server's QueryKpiData method -def test_query_kpi_data(monitoring_client): # pylint: disable=redefined-outer-name +def test_query_kpi_data(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name + + kpi_id_list = [] + for i in range(2): + kpi_id = monitoring_client.SetKpi(create_kpi_request(str(i+1))) + subs_scheduler.add_job(ingestion_data, args=[kpi_id.kpi_id.uuid]) + kpi_id_list.append(kpi_id) LOGGER.warning('test_query_kpi_data') - response = monitoring_client.QueryKpiData(kpi_query()) + sleep(5) + response = monitoring_client.QueryKpiData(kpi_query(kpi_id_list)) LOGGER.debug(str(response)) - assert isinstance(response, KpiList) - -def test_ingestion_data(monitoring_client): - _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) - _include_kpi_request = include_kpi_request(_kpi_id) - - for i in range(100): - _include_kpi_request = include_kpi_request(_kpi_id) - monitoring_client.IncludeKpi(_include_kpi_request) - time.sleep(0.01) - -# def test_subscription_scheduler(monitoring_client,metrics_db,subs_scheduler): -# subs_scheduler.add_job(ingestion_data(monitoring_client),id="1") + assert isinstance(response, RawKpiTable) + if (subs_scheduler.state != STATE_STOPPED): + subs_scheduler.shutdown() # Test case that makes use of client fixture to test server's SetKpiSubscription method -def test_set_kpi_subscription(monitoring_client,metrics_db): # pylint: disable=redefined-outer-name +def test_set_kpi_subscription(monitoring_client,subs_scheduler): # pylint: disable=redefined-outer-name LOGGER.warning('test_set_kpi_subscription') - kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) - # thread = threading.Thread(target=test_ingestion_data, args=(monitoring_client,metrics_db)) - # thread.start() - monitoring_client.IncludeKpi(include_kpi_request(kpi_id)) + kpi_id = monitoring_client.SetKpi(create_kpi_request('1')) + subs_scheduler.add_job(ingestion_data, args=[kpi_id.kpi_id.uuid]) response = monitoring_client.SetKpiSubscription(subs_descriptor(kpi_id)) assert isinstance(response, _MultiThreadedRendezvous) - LOGGER.debug(response) for item in response: LOGGER.debug(item) assert isinstance(item, SubsResponse) + if (subs_scheduler.state != STATE_STOPPED): + subs_scheduler.shutdown() # Test case that makes use of client fixture to test server's GetSubsDescriptor method def test_get_subs_descriptor(monitoring_client): @@ -331,7 +343,8 @@ def test_delete_subscription(monitoring_client): # Test case that makes use of client fixture to test server's SetKpiAlarm method def test_set_kpi_alarm(monitoring_client): LOGGER.warning('test_set_kpi_alarm') - response = monitoring_client.SetKpiAlarm(alarm_descriptor()) + kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) + response = monitoring_client.SetKpiAlarm(alarm_descriptor(kpi_id)) LOGGER.debug(str(response)) assert isinstance(response, AlarmID) @@ -345,28 +358,35 @@ def test_get_alarms(monitoring_client): # Test case that makes use of client fixture to test server's GetAlarmDescriptor method def test_get_alarm_descriptor(monitoring_client): LOGGER.warning('test_get_alarm_descriptor') - alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor()) - response = monitoring_client.GetAlarmDescriptor(alarm_id) - LOGGER.debug(response) - assert isinstance(response, AlarmDescriptor) + _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) + _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) + _response = monitoring_client.GetAlarmDescriptor(_alarm_id) + LOGGER.debug(_response) + assert isinstance(_response, AlarmDescriptor) # Test case that makes use of client fixture to test server's GetAlarmResponseStream method -def test_get_alarm_response_stream(monitoring_client): +def test_get_alarm_response_stream(monitoring_client,subs_scheduler): LOGGER.warning('test_get_alarm_descriptor') - alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor()) - response = monitoring_client.GetAlarmResponseStream(alarm_subscription(alarm_id)) - assert isinstance(response, _MultiThreadedRendezvous) - for item in response: - LOGGER.debug(response) + _kpi_id = monitoring_client.SetKpi(create_kpi_request('3')) + _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) + subs_scheduler.add_job(ingestion_data,args=[_kpi_id.kpi_id.uuid]) + _response = monitoring_client.GetAlarmResponseStream(alarm_subscription(_alarm_id)) + assert isinstance(_response, _MultiThreadedRendezvous) + for item in _response: + LOGGER.debug(item) assert isinstance(item,AlarmResponse) + if(subs_scheduler.state != STATE_STOPPED): + subs_scheduler.shutdown() + # Test case that makes use of client fixture to test server's DeleteAlarm method def test_delete_alarm(monitoring_client): LOGGER.warning('test_delete_alarm') - alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor()) - response = monitoring_client.DeleteAlarm(alarm_id) - LOGGER.debug(type(response)) - assert isinstance(response, Empty) + _kpi_id = monitoring_client.SetKpi(create_kpi_request_c()) + _alarm_id = monitoring_client.SetKpiAlarm(alarm_descriptor(_kpi_id)) + _response = monitoring_client.DeleteAlarm(_alarm_id) + LOGGER.debug(type(_response)) + assert isinstance(_response, Empty) # Test case that makes use of client fixture to test server's GetStreamKpi method def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-name @@ -384,64 +404,117 @@ def test_get_stream_kpi(monitoring_client): # pylint: disable=redefined-outer-na # response = monitoring_client.GetInstantKpi(kpi_id) # LOGGER.debug(response) # assert isinstance(response, Kpi) - # response = monitoring_client.GetInstantKpi(KpiId()) - # LOGGER.debug(type(response)) - # assert response.kpi_id.kpi_id.uuid == "NoID" -def test_managementdb_tools_insert_kpi(management_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_managementdb_tools_insert_kpi begin') - _create_kpi_request = create_kpi_request() - kpi_description = _create_kpi_request.kpi_description # pylint: disable=maybe-no-member - kpi_sample_type = _create_kpi_request.kpi_sample_type # pylint: disable=maybe-no-member - kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid # pylint: disable=maybe-no-member - kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member - kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member - - response = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id) - assert isinstance(response, int) -def test_managementdb_tools_get_kpi(management_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_managementdb_tools_get_kpi begin') - _create_kpi_request = create_kpi_request() +def test_managementdb_tools_kpis(management_db): # pylint: disable=redefined-outer-name + LOGGER.warning('test_managementdb_tools_kpis begin') + _create_kpi_request = create_kpi_request('5') kpi_description = _create_kpi_request.kpi_description # pylint: disable=maybe-no-member kpi_sample_type = _create_kpi_request.kpi_sample_type # pylint: disable=maybe-no-member kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid # pylint: disable=maybe-no-member kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member + kpi_slice_id = _create_kpi_request.slice_id.slice_uuid.uuid + kpi_connection_id = _create_kpi_request.connection_id.connection_uuid.uuid + + _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id,kpi_slice_id,kpi_connection_id) + assert isinstance(_kpi_id, int) - _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, kpi_service_id) response = management_db.get_KPI(_kpi_id) assert isinstance(response, tuple) -def test_managementdb_tools_get_kpis(management_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_managementdb_tools_get_kpis begin') + response = management_db.set_monitoring_flag(_kpi_id,True) + assert response is True + response = management_db.check_monitoring_flag(_kpi_id) + assert response is True + management_db.set_monitoring_flag(_kpi_id, False) + response = management_db.check_monitoring_flag(_kpi_id) + assert response is False + response = management_db.get_KPIS() assert isinstance(response, list) -def test_managementdb_tools_delete_kpi(management_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_managementdb_tools_get_kpi begin') - - _create_kpi_request = create_kpi_request() - kpi_description = _create_kpi_request.kpi_description # pylint: disable=maybe-no-member - kpi_sample_type = _create_kpi_request.kpi_sample_type # pylint: disable=maybe-no-member - kpi_device_id = _create_kpi_request.device_id.device_uuid.uuid # pylint: disable=maybe-no-member - kpi_endpoint_id = _create_kpi_request.endpoint_id.endpoint_uuid.uuid # pylint: disable=maybe-no-member - kpi_service_id = _create_kpi_request.service_id.service_uuid.uuid # pylint: disable=maybe-no-member - - _kpi_id = management_db.insert_KPI(kpi_description, kpi_sample_type, kpi_device_id, kpi_endpoint_id, - kpi_service_id) - response = management_db.delete_KPI(_kpi_id) - assert response -def test_metrics_db_tools_write_kpi(metrics_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_metric_sdb_tools_write_kpi begin') - -def test_metrics_db_tools_read_kpi_points(metrics_db): # pylint: disable=redefined-outer-name - LOGGER.warning('test_metrics_db_tools_read_kpi_points begin') +def test_managementdb_tools_insert_alarm(management_db): + LOGGER.warning('test_managementdb_tools_insert_alarm begin') + _alarm_description = "Alarm Description" + _alarm_name = "Alarm Name" + _kpi_id = "3" + _kpi_min_value = 0.0 + _kpi_max_value = 250.0 + _in_range = True + _include_min_value = False + _include_max_value = True + _alarm_id = management_db.insert_alarm(_alarm_description, _alarm_name, _kpi_id, _kpi_min_value, + _kpi_max_value, + _in_range, _include_min_value, _include_max_value) + LOGGER.debug(_alarm_id) + assert isinstance(_alarm_id,int) +# +# def test_metrics_db_tools(metrics_db): # pylint: disable=redefined-outer-name +# LOGGER.warning('test_metric_sdb_tools_write_kpi begin') +# _kpiId = "6" +# +# for i in range(50): +# _kpiSampleType = KpiSampleType.Name(KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED).upper().replace('KPISAMPLETYPE_', '') +# _deviceId = 'DEV4' +# _endpointId = 'END4' +# _serviceId = 'SERV4' +# _sliceId = 'SLC4' +# _connectionId = 'CON4' +# _time_stamp = timestamp_utcnow_to_float() +# _kpi_value = 500*random() +# +# metrics_db.write_KPI(_time_stamp, _kpiId, _kpiSampleType, _deviceId, _endpointId, _serviceId, _sliceId, _connectionId, +# _kpi_value) +# sleep(0.05) +# +# _query = f"SELECT * FROM monitoring WHERE kpi_id ='{_kpiId}'" +# _data = metrics_db.run_query(_query) +# assert len(_data) >= 50 +# +# def test_subscription_manager_create_subscription(management_db,metrics_db,subs_scheduler): +# LOGGER.warning('test_subscription_manager_create_subscription begin') +# subs_queue = Queue() +# +# subs_manager = SubscriptionManager(metrics_db) +# +# subs_scheduler.add_job(ingestion_data) +# +# kpi_id = "3" +# sampling_duration_s = 20 +# sampling_interval_s = 3 +# real_start_time = timestamp_utcnow_to_float() +# start_timestamp = real_start_time +# end_timestamp = start_timestamp + sampling_duration_s +# +# subs_id = management_db.insert_subscription(kpi_id, "localhost", sampling_duration_s, +# sampling_interval_s,start_timestamp,end_timestamp) +# subs_manager.create_subscription(subs_queue,subs_id,kpi_id,sampling_interval_s, +# sampling_duration_s,start_timestamp,end_timestamp) +# +# # This is here to simulate application activity (which keeps the main thread alive). +# total_points = 0 +# while True: +# while not subs_queue.empty(): +# list = subs_queue.get_nowait() +# kpi_list = KpiList() +# for item in list: +# kpi = Kpi() +# kpi.kpi_id.kpi_id.uuid = item[0] +# kpi.timestamp.timestamp = timestamp_string_to_float(item[1]) +# kpi.kpi_value.floatVal = item[2] +# kpi_list.kpi.append(kpi) +# total_points += 1 +# LOGGER.debug(kpi_list) +# if timestamp_utcnow_to_float() > end_timestamp: +# break +# +# assert total_points != 0 def test_events_tools( context_client : ContextClient, # pylint: disable=redefined-outer-name diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py index 1d55646abffcdb4a882167406ba046aca7bfa651..205306d0ec2d156a2050d1f95c5c1e990796e018 100644 --- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py +++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py @@ -13,11 +13,16 @@ # limitations under the License. import grpc, logging -from common.proto.context_pb2 import Empty +from common.Constants import DEFAULT_CONTEXT_UUID, INTERDOMAIN_TOPOLOGY_UUID +from common.proto.context_pb2 import ContextId, Empty from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.Device import get_devices_in_topology +from common.tools.context_queries.Link import get_links_in_topology +from common.tools.context_queries.InterDomain import is_inter_domain from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from pathcomp.frontend.service.algorithms.Factory import get_algorithm @@ -27,6 +32,8 @@ SERVICE_NAME = 'PathComp' METHOD_NAMES = ['Compute'] METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_UUID)) + class PathCompServiceServicerImpl(PathCompServiceServicer): def __init__(self) -> None: LOGGER.debug('Creating Servicer...') @@ -38,11 +45,18 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): context_client = ContextClient() - # TODO: add filtering of devices and links - # TODO: add contexts, topologies, and membership of devices/links in topologies + if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids): + devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_UUID) + else: + # TODO: improve filtering of devices and links + # TODO: add contexts, topologies, and membership of devices/links in topologies + devices = context_client.ListDevices(Empty()) + links = context_client.ListLinks(Empty()) + algorithm = get_algorithm(request) - algorithm.add_devices(context_client.ListDevices(Empty())) - algorithm.add_links(context_client.ListLinks(Empty())) + algorithm.add_devices(devices) + algorithm.add_links(links) algorithm.add_service_requests(request) #LOGGER.debug('device_list = {:s}' .format(str(algorithm.device_list ))) diff --git a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py index d5f937fd207807ba650669ea9fb2395b2e21b164..e0a2441823627843f1e14bde905da4f82ed7a593 100644 --- a/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/ShortestPathAlgorithm.py @@ -12,15 +12,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -from common.proto.pathcomp_pb2 import Algorithm_ShortestPath +from typing import Dict, Optional +from common.proto.pathcomp_pb2 import Algorithm_ShortestPath, PathCompRequest from ._Algorithm import _Algorithm class ShortestPathAlgorithm(_Algorithm): def __init__(self, algorithm : Algorithm_ShortestPath, class_name=__name__) -> None: super().__init__('SP', False, class_name=class_name) - def add_service_requests(self, requested_services) -> None: - super().add_service_requests(requested_services) + def add_service_requests(self, request : PathCompRequest) -> None: + super().add_service_requests(request) for service_request in self.service_list: service_request['algId' ] = self.algorithm_id service_request['syncPaths'] = self.sync_paths + + def _single_device_request(self) -> Optional[Dict]: + if len(self.service_list) != 1: return None + service = self.service_list[0] + endpoint_ids = service['service_endpoints_ids'] + if len(endpoint_ids) != 2: return None + if endpoint_ids[0]['device_id'] != endpoint_ids[-1]['device_id']: return None + return {'response-list': [{ + 'serviceId': service['serviceId'], + 'service_endpoints_ids': [endpoint_ids[0], endpoint_ids[-1]], + 'path': [{ + # not used by now + #'path-capacity': {'total-size': {'value': 200, 'unit': 0}}, + #'path-latency': {'fixed-latency-characteristic': '2.000000'}, + #'path-cost': {'cost-name': '', 'cost-value': '1.000000', 'cost-algorithm': '0.000000'}, + 'devices': [endpoint_ids[0], endpoint_ids[-1]] + }] + }]} + + def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None: + # if request is composed of a single service with single device (not supported by backend), + # produce synthetic reply directly + self.json_reply = self._single_device_request() + if self.json_reply is None: + # otherwise, follow normal logic through the backend + return super().execute(dump_request_filename, dump_reply_filename) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index 43811c0687fa64206cf4491750411f0aa2994ac6..3833642457bc5f8c2ba7b7d09f384a87dfabe41d 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -13,7 +13,7 @@ # limitations under the License. import json, logging, requests -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from common.proto.context_pb2 import ( ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum) @@ -23,7 +23,8 @@ from pathcomp.frontend.Config import BACKEND_URL from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum from .tools.EroPathToHops import eropath_to_hops from .tools.ComposeRequest import compose_device, compose_link, compose_service -from .tools.ComputeSubServices import convert_explicit_path_hops_to_connections +from .tools.ComputeSubServices import ( + convert_explicit_path_hops_to_connections, convert_explicit_path_hops_to_plain_connection) class _Algorithm: def __init__(self, algorithm_id : str, sync_paths : bool, class_name=__name__) -> None: @@ -46,8 +47,9 @@ class _Algorithm: self.service_list : List[Dict] = list() self.service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = dict() - def add_devices(self, grpc_devices : DeviceList) -> None: - for grpc_device in grpc_devices.devices: + def add_devices(self, grpc_devices : Union[List[Device], DeviceList]) -> None: + if isinstance(grpc_devices, DeviceList): grpc_devices = grpc_devices.devices + for grpc_device in grpc_devices: json_device = compose_device(grpc_device) self.device_list.append(json_device) @@ -62,8 +64,9 @@ class _Algorithm: self.endpoint_dict[device_uuid] = device_endpoint_dict - def add_links(self, grpc_links : LinkList) -> None: - for grpc_link in grpc_links.links: + def add_links(self, grpc_links : Union[List[Link], LinkList]) -> None: + if isinstance(grpc_links, LinkList): grpc_links = grpc_links.links + for grpc_link in grpc_links: json_link = compose_link(grpc_link) self.link_list.append(json_link) @@ -206,7 +209,12 @@ class _Algorithm: for service_path_ero in response['path']: path_hops = eropath_to_hops(service_path_ero['devices'], self.endpoint_to_link_dict) - connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + try: + connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + except: # pylint: disable=bare-except + # if not able to extrapolate sub-services and sub-connections, + # assume single service and single connection + connections = convert_explicit_path_hops_to_plain_connection(path_hops, service_uuid) for connection in connections: connection_uuid,device_layer,path_hops,_ = connection diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index c1977cedb9b341fbb767a5fb8c829cd5f633884c..17a7e74ef573e4926d53045ab8888c71a3dd73d7 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -34,9 +34,11 @@ def compose_topology_id(topology_id : TopologyId) -> Dict: return {'contextId': context_uuid, 'topology_uuid': topology_uuid} def compose_service_id(service_id : ServiceId) -> Dict: - context_uuid = service_id.context_id.context_uuid.uuid - - if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID + # force context_uuid to be always DEFAULT_CONTEXT_UUID for simplicity + # for interdomain contexts are managed in a particular way + #context_uuid = service_id.context_id.context_uuid.uuid + #if len(context_uuid) == 0: context_uuid = DEFAULT_CONTEXT_UUID + context_uuid = DEFAULT_CONTEXT_UUID service_uuid = service_id.service_uuid.uuid return {'contextId': context_uuid, 'service_uuid': service_uuid} diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index f2c66cb24ca3c15c71f22dbe4eeca634e18d985a..7c7b62e2d039d2e6bad979b3601e09ca1c54ea51 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -94,3 +94,19 @@ def convert_explicit_path_hops_to_connections( connections.append(connection_stack.get()) assert connection_stack.empty() return connections + +def convert_explicit_path_hops_to_plain_connection( + path_hops : List[Dict], main_connection_uuid : str +) -> List[Tuple[str, DeviceLayerEnum, List[str], List[str]]]: + + connection : Tuple[str, DeviceLayerEnum, List[str], List[str]] = \ + (main_connection_uuid, DeviceLayerEnum.PACKET_DEVICE, [], []) + + last_device_uuid = None + for path_hop in path_hops: + device_uuid = path_hop['device'] + if last_device_uuid == device_uuid: continue + connection[2].append(path_hop) + last_device_uuid = device_uuid + + return [connection] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py index 7d61e1ef8f78950ec6f9bd0878de136d4a01b554..56e11b1b4a0293bcdbed2f1d3cd7c08814d7b161 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py @@ -80,6 +80,7 @@ class DeviceLayerEnum(IntEnum): DEVICE_TYPE_TO_LAYER = { DeviceTypeEnum.EMULATED_DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, DeviceTypeEnum.DATACENTER.value : DeviceLayerEnum.APPLICATION_DEVICE, + DeviceTypeEnum.NETWORK.value : DeviceLayerEnum.APPLICATION_DEVICE, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, DeviceTypeEnum.PACKET_ROUTER.value : DeviceLayerEnum.PACKET_DEVICE, diff --git a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py index 021940937c23a7cb461a603aa32a15f16626eb1d..a885ddb29c3fa70d6bccea18f43fef5b038aae68 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py +++ b/src/pathcomp/frontend/service/algorithms/tools/EroPathToHops.py @@ -42,35 +42,43 @@ # ] # +import logging from typing import Dict, List +LOGGER = logging.getLogger(__name__) + def eropath_to_hops(ero_path : List[Dict], endpoint_to_link_dict : Dict) -> List[Dict]: - path_hops = [] - for endpoint in ero_path: - device_uuid = endpoint['device_id'] - endpoint_uuid = endpoint['endpoint_uuid'] + try: + path_hops = [] + for endpoint in ero_path: + device_uuid = endpoint['device_id'] + endpoint_uuid = endpoint['endpoint_uuid'] - if len(path_hops) == 0: - path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid}) - continue + if len(path_hops) == 0: + path_hops.append({'device': device_uuid, 'ingress_ep': endpoint_uuid}) + continue - last_hop = path_hops[-1] - if (last_hop['device'] == device_uuid): - if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue - last_hop['egress_ep'] = endpoint_uuid - continue + last_hop = path_hops[-1] + if (last_hop['device'] == device_uuid): + if ('ingress_ep' not in last_hop) or ('egress_ep' in last_hop): continue + last_hop['egress_ep'] = endpoint_uuid + continue - endpoint_key = (last_hop['device'], last_hop['egress_ep']) - link_tuple = endpoint_to_link_dict.get(endpoint_key) - ingress = next(iter([ - ep_id for ep_id in link_tuple[0]['link_endpoint_ids'] - if (ep_id['endpoint_id']['device_id'] == device_uuid) and\ - (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid) - ]), None) - if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path') - path_hops.append({ - 'device': ingress['endpoint_id']['device_id'], - 'ingress_ep': ingress['endpoint_id']['endpoint_uuid'], - 'egress_ep': endpoint_uuid, - }) - return path_hops + endpoint_key = (last_hop['device'], last_hop['egress_ep']) + link_tuple = endpoint_to_link_dict.get(endpoint_key) + ingress = next(iter([ + ep_id for ep_id in link_tuple[0]['link_endpoint_ids'] + if (ep_id['endpoint_id']['device_id'] == device_uuid) and\ + (ep_id['endpoint_id']['endpoint_uuid'] != endpoint_uuid) + ]), None) + if ingress['endpoint_id']['device_id'] != device_uuid: raise Exception('Malformed path') + path_hops.append({ + 'device': ingress['endpoint_id']['device_id'], + 'ingress_ep': ingress['endpoint_id']['endpoint_uuid'], + 'egress_ep': endpoint_uuid, + }) + return path_hops + except: + LOGGER.exception('Unhandled exception: ero_path={:s} endpoint_to_link_dict={:s}'.format( + str(ero_path), str(endpoint_to_link_dict))) + raise diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a2b4e26c2147273256587e5580265464be69758 --- /dev/null +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -0,0 +1,89 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5 + app.quarkus.io/build-timestamp: 2022-11-18 - 12:56:37 +0000 + labels: + app.kubernetes.io/name: policyservice + app: policyservice + name: policyservice +spec: + ports: + - name: http + port: 8080 + targetPort: 8080 + - name: grpc + port: 6060 + targetPort: 6060 + selector: + app.kubernetes.io/name: policyservice + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5 + app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000 + labels: + app: policyservice + app.kubernetes.io/name: policyservice + name: policyservice +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: policyservice + template: + metadata: + annotations: + app.quarkus.io/commit-id: e369fc6b4de63303f91e1fd3de0b6a591a86c0f5 + app.quarkus.io/build-timestamp: 2022-11-22 - 14:10:01 +0000 + labels: + app: policyservice + app.kubernetes.io/name: policyservice + spec: + containers: + - env: + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_SERVICE_HOST + value: monitoringservice + - name: CONTEXT_SERVICE_HOST + value: contextservice + - name: SERVICE_SERVICE_HOST + value: serviceservice + image: registry.gitlab.com/teraflow-h2020/controller/policy:0.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /q/health/live + port: 8080 + scheme: HTTP + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + name: policyservice + ports: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 6060 + name: grpc + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /q/health/ready + port: 8080 + scheme: HTTP + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 0f21812089e2af8271884ef7539f979ff0426a5a..afc15795c7b6ae8bf59b719db2f3d920614aa51c 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -23,7 +23,7 @@ SERVICE_TYPE_VALUES = { ServiceTypeEnum.SERVICETYPE_UNKNOWN, ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM, - ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE } DEVICE_DRIVER_VALUES = { diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 34689ca1136c68611a098115b5acf5b74a788372..4310a17d71d43d621f96876b6f68694e680bd6e1 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -17,6 +17,7 @@ from ..service_handler_api.FilterFields import FilterFieldEnum from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler +from .p4.p4_service_handler import P4ServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler @@ -51,4 +52,10 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, } ]), -] \ No newline at end of file + (P4ServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_L2NM, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), +] diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py index 18a5aea29eb7c025372d00828feb127336e90102..f12c9ab984205b9057dd1507114e5bc17d8deaa6 100644 --- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -60,12 +60,13 @@ def setup_config_rules( {'name': network_instance_name, 'type': 'L2VSI'}), json_config_rule_set( - '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), json_config_rule_set( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, + 'subinterface': sub_interface_index}), json_config_rule_set( '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), @@ -107,10 +108,11 @@ def teardown_config_rules( json_config_rule_delete( '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), - {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, + 'subinterface': sub_interface_index}), json_config_rule_delete( - '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + '/interface[{:s}]/subinterface[{:d}]'.format(if_cirid_name, sub_interface_index), {'name': if_cirid_name, 'index': sub_interface_index}), json_config_rule_delete( diff --git a/src/service/service/service_handlers/p4/__init__.py b/src/service/service/service_handlers/p4/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9953c820575d42fa88351cc8de022d880ba96e6a --- /dev/null +++ b/src/service/service/service_handlers/p4/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..48b9715b7bb89bc53d6888299836e9b2bf89f1d4 --- /dev/null +++ b/src/service/service/service_handlers/p4/p4_service_handler.py @@ -0,0 +1,305 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +P4 service handler for the TeraFlowSDN controller. +""" + +import anytree, json, logging +from typing import Any, Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service +from common.tools.object_factory.ConfigRule import json_config_rule, json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_length +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +def create_rule_set(endpoint_a, endpoint_b): + return json_config_rule_set( + 'table', + { + 'table-name': 'IngressPipeImpl.l2_exact_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'IngressPipeImpl.set_egress_port', + 'action-params': [ + { + 'action-param': 'port', + 'action-value': endpoint_b + } + ] + } +) + +def create_rule_del(endpoint_a, endpoint_b): + return json_config_rule_delete( + 'table', + { + 'table-name': 'IngressPipeImpl.l2_exact_table', + 'match-fields': [ + { + 'match-field': 'standard_metadata.ingress_port', + 'match-value': endpoint_a + } + ], + 'action-name': 'IngressPipeImpl.set_egress_port', + 'action-params': [ + { + 'action-param': 'port', + 'action-value': endpoint_b + } + ] + } +) + +class P4ServiceHandler(_ServiceHandler): + def __init__(self, + service: Service, + task_executor : TaskExecutor, + **settings) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + """ + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member + + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints form a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + + history = {} + + results = [] + index = {} + i = 0 + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + if device_uuid in history: + try: + matched_endpoint_uuid = history.pop(device_uuid) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + del device.device_config.config_rules[:] + + # One way + rule = create_rule_set(matched_endpoint_uuid, endpoint_uuid) + device.device_config.config_rules.append(ConfigRule(**rule)) + # The other way + rule = create_rule_set(endpoint_uuid, matched_endpoint_uuid) + device.device_config.config_rules.append(ConfigRule(**rule)) + + self.__task_executor.configure_device(device) + + results.append(True) + results[index[device_uuid]] = True + except Exception as e: + LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) + results.append(e) + else: + history[device_uuid] = endpoint_uuid + index[device_uuid] = i + results.append(False) + i = i+1 + + return results + + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints form a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + + history = {} + + results = [] + index = {} + i = 0 + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now + if device_uuid in history: + try: + matched_endpoint_uuid = history.pop(device_uuid) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + del device.device_config.config_rules[:] + + # One way + rule = create_rule_del(matched_endpoint_uuid, endpoint_uuid) + device.device_config.config_rules.append(ConfigRule(**rule)) + # The other way + rule = create_rule_del(endpoint_uuid, matched_endpoint_uuid) + device.device_config.config_rules.append(ConfigRule(**rule)) + + self.__task_executor.configure_device(device) + + results.append(True) + results[index[device_uuid]] = True + except Exception as e: + LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) + results.append(e) + else: + history[device_uuid] = endpoint_uuid + index[device_uuid] = i + results.append(False) + i = i+1 + + return results + + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] \ No newline at end of file diff --git a/src/slice/Dockerfile b/src/slice/Dockerfile index 96a751d156edcaef38794ecfe5b409cbeb081e82..7dadc477f70667c827d4a9eb0ddd013c85b97344 100644 --- a/src/slice/Dockerfile +++ b/src/slice/Dockerfile @@ -64,6 +64,7 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/. context/ COPY src/interdomain/. interdomain/ +COPY src/pathcomp/. pathcomp/ COPY src/service/. service/ COPY src/slice/. slice/ diff --git a/src/common/database/api/context/slice/SliceStatus.py b/src/slice/old_code/SliceStatus.py similarity index 100% rename from src/common/database/api/context/slice/SliceStatus.py rename to src/slice/old_code/SliceStatus.py diff --git a/src/slice/old_code/Tools.py b/src/slice/old_code/Tools.py index 4ea7900489f27588399e2eb94b6a5576d8b08fd0..08323f935195d8a0221b3f8889c0e6beeef94cb2 100644 --- a/src/slice/old_code/Tools.py +++ b/src/slice/old_code/Tools.py @@ -18,7 +18,7 @@ from common.Checkers import chk_options, chk_string from common.database.api.Database import Database from common.database.api.context.Constants import DEFAULT_CONTEXT_ID, DEFAULT_TOPOLOGY_ID from common.database.api.context.service.Service import Service -from common.database.api.context.slice.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum +from slice.old_code.SliceStatus import SliceStatus, slicestatus_enum_values, to_slicestatus_enum from common.database.api.context.topology.device.Endpoint import Endpoint from common.exceptions.ServiceException import ServiceException from common.proto.slice_pb2 import TransportSlice diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 53875f0e6ae7c8e3e7d5ac9dad7501a2136844c4..ada7218588391766147a02f9713b540016522aa7 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -17,11 +17,12 @@ from common.proto.context_pb2 import ( Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) from common.proto.slice_pb2_grpc import SliceServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.context_queries.InterDomain import is_multi_domain from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints from common.tools.grpc.EndPointIds import copy_endpoint_ids from common.tools.grpc.ServiceIds import update_service_ids -from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient @@ -42,103 +43,118 @@ class SliceServiceServicerImpl(SliceServiceServicer): try: _slice = context_client.GetSlice(request.slice_id) #json_current_slice = grpc_message_to_json(_slice) - except: + except: # pylint: disable=bare-except #json_current_slice = {} slice_request = Slice() - slice_request.slice_id.CopyFrom(request.slice_id) - slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED + slice_request.slice_id.CopyFrom(request.slice_id) # pylint: disable=no-member + slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member context_client.SetSlice(slice_request) _slice = context_client.GetSlice(request.slice_id) + slice_request = Slice() slice_request.CopyFrom(_slice) + if len(request.slice_endpoint_ids) < 2: + # unable to identify the kind of slice; just update endpoints, constraints and config rules + # update the slice in database, and return + # pylint: disable=no-member + copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) + copy_constraints(request.slice_constraints, slice_request.slice_constraints) + copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) + return context_client.SetSlice(slice_request) + #LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice))) #json_updated_slice = grpc_message_to_json(request) #LOGGER.info('json_updated_slice = {:s}'.format(str(json_updated_slice))) #changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice) #LOGGER.info('changes = {:s}'.format(str(changes))) - domains = set() - for slice_endpoint_id in request.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - device_parts = device_uuid.split('@') - domain_uuid = '' if len(device_parts) == 1 else device_parts[1] - domains.add(domain_uuid) - LOGGER.info('domains = {:s}'.format(str(domains))) - is_multi_domain = len(domains) > 1 - LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) - - if is_multi_domain: + if is_multi_domain(context_client, request.slice_endpoint_ids): interdomain_client = InterdomainClient() slice_id = interdomain_client.RequestSlice(request) - else: - service_id = ServiceId() - context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid - - service_client = ServiceClient() - try: - _service = context_client.GetService(service_id) - except: - service_request = Service() - service_request.service_id.CopyFrom(service_id) - service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN - service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED - service_reply = service_client.CreateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service creation failed. Wrong Service Id was returned') - _service = context_client.GetService(service_id) + slice_ = context_client.GetSlice(slice_id) + slice_active = Slice() + slice_active.CopyFrom(slice_) + slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member + context_client.SetSlice(slice_active) + return slice_id + + # Local domain slice + service_id = ServiceId() + # pylint: disable=no-member + context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid + + service_client = ServiceClient() + try: + _service = context_client.GetService(service_id) + except: # pylint: disable=bare-except + # pylint: disable=no-member service_request = Service() - service_request.CopyFrom(_service) - - copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids) - copy_constraints(request.slice_constraints, service_request.service_constraints) - copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules) - + service_request.service_id.CopyFrom(service_id) service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN - for config_rule in request.slice_config.config_rules: - LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) - config_rule_kind = config_rule.WhichOneof('config_rule') - LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) - if config_rule_kind != 'custom': continue - custom = config_rule.custom - resource_key = custom.resource_key - LOGGER.info('resource_key: {:s}'.format(str(resource_key))) - - # TODO: parse resource key with regular expression, e.g.: - # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) - if not resource_key.startswith('/device'): continue - if not resource_key.endswith('/settings'): continue - - resource_value = json.loads(custom.resource_value) - LOGGER.info('resource_value: {:s}'.format(str(resource_value))) - - if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: - if (resource_value.get('address_ip') is not None and \ - resource_value.get('address_prefix') is not None): - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - LOGGER.info('is L3') - else: - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - LOGGER.info('is L2') - break - - service_reply = service_client.UpdateService(service_request) + service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_reply = service_client.CreateService(service_request) if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service update failed. Wrong Service Id was returned') - - copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) - copy_constraints(request.slice_constraints, slice_request.slice_constraints) - copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) - - update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) - context_client.SetSlice(slice_request) - slice_id = slice_request.slice_id + # pylint: disable=raise-missing-from + raise Exception('Service creation failed. Wrong Service Id was returned') + _service = context_client.GetService(service_id) + service_request = Service() + service_request.CopyFrom(_service) + + # pylint: disable=no-member + copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids) + copy_constraints(request.slice_constraints, service_request.service_constraints) + copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules) + + service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN + for config_rule in request.slice_config.config_rules: + LOGGER.info('config_rule: {:s}'.format(grpc_message_to_json_string(config_rule))) + config_rule_kind = config_rule.WhichOneof('config_rule') + LOGGER.info('config_rule_kind: {:s}'.format(str(config_rule_kind))) + if config_rule_kind != 'custom': continue + custom = config_rule.custom + resource_key = custom.resource_key + LOGGER.info('resource_key: {:s}'.format(str(resource_key))) + + # TODO: parse resource key with regular expression, e.g.: + # m = re.match('\/device\[[^\]]\]\/endpoint\[[^\]]\]\/settings', s) + if not resource_key.startswith('/device'): continue + if not resource_key.endswith('/settings'): continue + + resource_value = json.loads(custom.resource_value) + LOGGER.info('resource_value: {:s}'.format(str(resource_value))) + + if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: + if (resource_value.get('address_ip') is not None and \ + resource_value.get('address_prefix') is not None): + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM + LOGGER.info('is L3') + else: + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + LOGGER.info('is L2') + break + + if service_request.service_type == ServiceTypeEnum.SERVICETYPE_UNKNOWN: + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + LOGGER.info('assume L2') + + service_reply = service_client.UpdateService(service_request) + if service_reply != service_request.service_id: # pylint: disable=no-member + raise Exception('Service update failed. Wrong Service Id was returned') + + copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) + copy_constraints(request.slice_constraints, slice_request.slice_constraints) + copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) + + update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) + context_client.SetSlice(slice_request) + slice_id = slice_request.slice_id slice_ = context_client.GetSlice(slice_id) slice_active = Slice() slice_active.CopyFrom(slice_) - slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE + slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member context_client.SetSlice(slice_active) return slice_id @@ -175,21 +191,11 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client = ContextClient() try: _slice = context_client.GetSlice(request) - except: + except: # pylint: disable=bare-except return Empty() - domains = set() - for slice_endpoint_id in _slice.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - device_parts = device_uuid.split('@') - domain_uuid = '' if len(device_parts) == 1 else device_parts[1] - domains.add(domain_uuid) - LOGGER.info('domains = {:s}'.format(str(domains))) - is_multi_domain = len(domains) > 1 - LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) - - if is_multi_domain: - interdomain_client = InterdomainClient() + if is_multi_domain(context_client, _slice.slice_endpoint_ids): + #interdomain_client = InterdomainClient() #slice_id = interdomain_client.DeleteSlice(request) raise NotImplementedError('Delete inter-domain slice') else: diff --git a/src/slice/service/__main__.py b/src/slice/service/__main__.py index a59c54b4b1b56865871d331409c1a7f60629aec6..b2f4536503ac176628c42cf0211315089697c50e 100644 --- a/src/slice/service/__main__.py +++ b/src/slice/service/__main__.py @@ -15,7 +15,9 @@ import logging, signal, sys, threading from prometheus_client import start_http_server from common.Constants import ServiceNameEnum -from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, + wait_for_environment_variables) from .SliceService import SliceService terminate = threading.Event() diff --git a/src/tests/Fixtures.py b/src/tests/Fixtures.py index aeead8448651b386f4c69d12c139b6043fe5ef55..25b73e1de143b8c60d9a726ddf2bd3cea97d17a5 100644 --- a/src/tests/Fixtures.py +++ b/src/tests/Fixtures.py @@ -13,8 +13,6 @@ # limitations under the License. import pytest -from common.Settings import get_setting -from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from monitoring.client.MonitoringClient import MonitoringClient diff --git a/src/tests/benchmark/automation/.gitignore b/src/tests/benchmark/automation/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e --- /dev/null +++ b/src/tests/benchmark/automation/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +descriptors_real.json diff --git a/src/tests/benchmark/automation/README.md b/src/tests/benchmark/automation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b5b2a01efc357b5d8eca6a6890b051b4ffac260 --- /dev/null +++ b/src/tests/benchmark/automation/README.md @@ -0,0 +1,17 @@ + +# Grafana k6 load testing tool + +# K6 Installation Instructions on Ubuntu + +sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 +echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list +sudo apt-get update +sudo apt-get install k6 + +Or install k6 via snap: + +sudo apt install snapd +sudo snap install k6 + +# Running K6 +k6 run script.js \ No newline at end of file diff --git a/src/tests/benchmark/automation/ZtpAdd.js b/src/tests/benchmark/automation/ZtpAdd.js new file mode 100644 index 0000000000000000000000000000000000000000..0f649b8ccfbd910faadcc41549eddf34b83c6795 --- /dev/null +++ b/src/tests/benchmark/automation/ZtpAdd.js @@ -0,0 +1,55 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../../../../proto'], 'automation.proto'); + +export const data = []; +for (let i = 1; i < 801; i++) { + data.push({ + "devRoleType": "DEV_CONF", + "devRoleId": { + "devId": {"device_uuid": {"uuid": "EMU-" + i}}, + "devRoleId": {"uuid": "EMU-" + i} + } + }); +}; + +export const options = { + scenarios :{ + + "ZtpAdd-scenario": { + executor: "shared-iterations", + vus: 800, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.239:5050', { + plaintext: true, + timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('automation.AutomationService/ZtpAdd', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summary_add_800.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/automation/ZtpDelete.js b/src/tests/benchmark/automation/ZtpDelete.js new file mode 100644 index 0000000000000000000000000000000000000000..58af9f25d924dda254e142dcf3962b62359ff42c --- /dev/null +++ b/src/tests/benchmark/automation/ZtpDelete.js @@ -0,0 +1,55 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../../../../proto'], 'automation.proto'); + +export const data = []; +for (let i = 1; i < 801; i++) { + data.push({ + "devRoleType": "DEV_CONF", + "devRoleId": { + "devId": {"device_uuid": {"uuid": "EMU-" + i}}, + "devRoleId": {"uuid": "EMU-" + i} + } + }); +}; + +export const options = { + scenarios :{ + + "ZtpAdd-scenario": { + executor: "shared-iterations", + vus: 800, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.232:5050', { + plaintext: true, + timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('automation.AutomationService/ZtpDelete', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summary_delete_800.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/automation/ZtpUpdate.js b/src/tests/benchmark/automation/ZtpUpdate.js new file mode 100755 index 0000000000000000000000000000000000000000..39135ec58643339562ff87c96a03be3968c198d4 --- /dev/null +++ b/src/tests/benchmark/automation/ZtpUpdate.js @@ -0,0 +1,60 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../../../../proto'], 'automation.proto'); + +export const data = []; +for (let i = 1; i < 801; i++) { + data.push({ + "devRole": { + "devRoleType": "DEV_CONF", + "devRoleId": { + "devId": {"device_uuid": {"uuid": "EMU-"+i}}, + "devRoleId": {"uuid": "1"} + } + }, + "devConfig": { + "config_rules": [] + } + }); +}; + +export const options = { + scenarios :{ + + "ZtpAdd-scenario": { + executor: "shared-iterations", + vus: 800, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.250:5050', { + plaintext: true, + timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('automation.AutomationService/ZtpUpdate', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summaryUpdate801.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/automation/__init__.py b/src/tests/benchmark/automation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/benchmark/automation/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/benchmark/automation/run_test_01_bootstrap.sh b/src/tests/benchmark/automation/run_test_01_bootstrap.sh new file mode 100755 index 0000000000000000000000000000000000000000..dee1739270944bc19e370bb249b083f740e60737 --- /dev/null +++ b/src/tests/benchmark/automation/run_test_01_bootstrap.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/automation/tests/test_functional_bootstrap.py diff --git a/src/tests/benchmark/automation/run_test_02_cleanup.sh b/src/tests/benchmark/automation/run_test_02_cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..8f68302d6abfeac6750fff7183524c644355008e --- /dev/null +++ b/src/tests/benchmark/automation/run_test_02_cleanup.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/automation/tests/test_functional_cleanup.py diff --git a/src/tests/benchmark/automation/tests/.gitignore b/src/tests/benchmark/automation/tests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..76cb708d1b532c9b69166e55f36bcb912fd5e370 --- /dev/null +++ b/src/tests/benchmark/automation/tests/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +Credentials.py diff --git a/src/tests/benchmark/automation/tests/Fixtures.py b/src/tests/benchmark/automation/tests/Fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..3b35a12e299ba776e909fbdd2739e971431083a6 --- /dev/null +++ b/src/tests/benchmark/automation/tests/Fixtures.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, logging +from common.Settings import get_setting +from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.MockOSM import MockOSM +from .Objects import WIM_MAPPING + +LOGGER = logging.getLogger(__name__) + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) diff --git a/src/tests/benchmark/automation/tests/Objects.py b/src/tests/benchmark/automation/tests/Objects.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea6f500807e3dbcc2e34dbd559614ff91c955d8 --- /dev/null +++ b/src/tests/benchmark/automation/tests/Objects.py @@ -0,0 +1,54 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, + json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) +from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id +from common.tools.object_factory.Link import json_link, json_link_id +from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + +# ----- Topology ------------------------------------------------------------------------------------------------------- +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) + +# ----- Monitoring Samples --------------------------------------------------------------------------------------------- +PACKET_PORT_SAMPLE_TYPES = [ + KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, + KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, +] + +# ----- Devices -------------------------------------------------------------------------------------------------------- +DEVICE_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] +DEVICE_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_ENDPOINT_DEFS) + +# ----- Object Collections --------------------------------------------------------------------------------------------- +CONTEXTS = [CONTEXT] +TOPOLOGIES = [TOPOLOGY] + +DEVICES = [] +for x in range(1, 1000): + DEVICE_UUID = 'EMU-' + str(x) + DEVICE = json_device_emulated_packet_router_disabled(DEVICE_UUID) + DEVICES.append((DEVICE, DEVICE_CONNECT_RULES)) diff --git a/src/tests/benchmark/automation/tests/__init__.py b/src/tests/benchmark/automation/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/benchmark/automation/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/benchmark/automation/tests/test_functional_bootstrap.py b/src/tests/benchmark/automation/tests/test_functional_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..3d588801511ae3bc6b5be87566c61b04bf54e467 --- /dev/null +++ b/src/tests/benchmark/automation/tests/test_functional_bootstrap.py @@ -0,0 +1,110 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.proto.monitoring_pb2 import KpiDescriptorList +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from monitoring.client.MonitoringClient import MonitoringClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import Context, ContextId, Device, Empty, Topology +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, TOPOLOGIES +from tests.Fixtures import context_client, device_client, monitoring_client + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +def test_scenario_empty(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure database is empty ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + +def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name + + # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- + for context in CONTEXTS: + context_uuid = context['context_id']['context_uuid']['uuid'] + LOGGER.info('Adding Context {:s}'.format(context_uuid)) + response = context_client.SetContext(Context(**context)) + assert response.context_uuid.uuid == context_uuid + + for topology in TOPOLOGIES: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + topology_uuid = topology['topology_id']['topology_uuid']['uuid'] + LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + response = context_client.SetTopology(Topology(**topology)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.topology_uuid.uuid == topology_uuid + context_id = json_context_id(context_uuid) + + +def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + +def test_devices_bootstraping( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- + for device, connect_rules in DEVICES: + device_uuid = device['device_id']['device_uuid']['uuid'] + LOGGER.info('Adding Device {:s}'.format(device_uuid)) + + device_with_connect_rules = copy.deepcopy(device) + device_with_connect_rules['device_config']['config_rules'].extend(connect_rules) + response = device_client.AddDevice(Device(**device_with_connect_rules)) + assert response.device_uuid.uuid == device_uuid + + +def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + +def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure links are created ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) \ No newline at end of file diff --git a/src/tests/benchmark/automation/tests/test_functional_cleanup.py b/src/tests/benchmark/automation/tests/test_functional_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..9b6e51c3e296261e52669980f656c6fdf12ceb65 --- /dev/null +++ b/src/tests/benchmark/automation/tests/test_functional_cleanup.py @@ -0,0 +1,68 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, TOPOLOGIES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_scenario_cleanup( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- + for device, _ in DEVICES: + device_id = device['device_id'] + device_uuid = device_id['device_uuid']['uuid'] + LOGGER.info('Deleting Device {:s}'.format(device_uuid)) + device_client.DeleteDevice(DeviceId(**device_id)) + #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) + + # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ + for topology in TOPOLOGIES: + topology_id = topology['topology_id'] + context_uuid = topology_id['context_id']['context_uuid']['uuid'] + topology_uuid = topology_id['topology_uuid']['uuid'] + LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + context_client.RemoveTopology(TopologyId(**topology_id)) + context_id = json_context_id(context_uuid) + #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) + + # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- + for context in CONTEXTS: + context_id = context['context_id'] + context_uuid = context_id['context_uuid']['uuid'] + LOGGER.info('Deleting Context {:s}'.format(context_uuid)) + context_client.RemoveContext(ContextId(**context_id)) + #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) + + +def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure database is empty again ------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 diff --git a/src/tests/benchmark/policy/.gitignore b/src/tests/benchmark/policy/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e --- /dev/null +++ b/src/tests/benchmark/policy/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +descriptors_real.json diff --git a/src/tests/benchmark/policy/PolicyAddService.js b/src/tests/benchmark/policy/PolicyAddService.js new file mode 100644 index 0000000000000000000000000000000000000000..708209ba01862a169dd7007a05e2ba29a198282a --- /dev/null +++ b/src/tests/benchmark/policy/PolicyAddService.js @@ -0,0 +1,72 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../proto'], 'policy.proto'); + +export const data = []; +for (let i = 1; i < 2; i++) { + data.push( + { + "serviceId": { + "context_id": { + "context_uuid": {"uuid": "admin"} + }, + "service_uuid": { + "uuid": "6942d780-cfa9-4dea-a946-a8a0b3f7eab2" + } + }, + "policyRuleBasic": { + "policyRuleId": {"uuid": {"uuid": i.toString()}}, + "policyRuleState": {"policyRuleState": "POLICY_UNDEFINED"}, + "priority": 0, + "conditionList": [{"kpiId": {"kpi_id": {"uuid": "1"}}, + "numericalOperator": "POLICYRULE_CONDITION_NUMERICAL_EQUAL", + "kpiValue": {"boolVal": false} + + }], + "actionList": [{}], + "booleanOperator": "POLICYRULE_CONDITION_BOOLEAN_UNDEFINED" + } + } + ); +}; + +export const options = { + scenarios :{ + + "AddPolicy-scenario": { + executor: "shared-iterations", + vus: 1, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.198:6060', { + plaintext: true, +// timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('policy.PolicyService/PolicyAddService', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summary_add_1.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/policy/PolicyDelete.js b/src/tests/benchmark/policy/PolicyDelete.js new file mode 100644 index 0000000000000000000000000000000000000000..85946837eb1123bd698f907e13415b7281a779d2 --- /dev/null +++ b/src/tests/benchmark/policy/PolicyDelete.js @@ -0,0 +1,53 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../proto'], 'policy.proto'); + +export const data = []; +for (let i = 1; i < 2; i++) { + data.push( + { + "uuid": {"uuid": i.toString()} + } + ); +}; + +export const options = { + scenarios :{ + + "AddPolicy-scenario": { + executor: "shared-iterations", + vus: 1, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.198:6060', { + plaintext: true, +// timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('policy.PolicyService/PolicyDelete', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summary_delete_1.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/policy/PolicyUpdateService.js b/src/tests/benchmark/policy/PolicyUpdateService.js new file mode 100644 index 0000000000000000000000000000000000000000..a3774f9dac0dec420b88e6e236e8dcd2e698e3cd --- /dev/null +++ b/src/tests/benchmark/policy/PolicyUpdateService.js @@ -0,0 +1,72 @@ +import grpc from 'k6/net/grpc'; +import exec from "k6/execution"; +import { check, sleep } from 'k6'; + +const client = new grpc.Client(); +client.load(['../proto'], 'policy.proto'); + +export const data = []; +for (let i = 1; i < 2; i++) { + data.push( + { + "serviceId": { + "context_id": { + "context_uuid": {"uuid": "admin"} + }, + "service_uuid": { + "uuid": "6942d780-cfa9-4dea-a946-a8a0b3f7eab2" + } + }, + "policyRuleBasic": { + "policyRuleId": {"uuid": {"uuid": i.toString()}}, + "policyRuleState": {"policyRuleState": "POLICY_UNDEFINED"}, + "priority": 0, + "conditionList": [{"kpiId": {"kpi_id": {"uuid": "1"}}, + "numericalOperator": "POLICYRULE_CONDITION_NUMERICAL_EQUAL", + "kpiValue": {"boolVal": false} + + }], + "actionList": [{}], + "booleanOperator": "POLICYRULE_CONDITION_BOOLEAN_UNDEFINED" + } + } + ); +}; + +export const options = { + scenarios :{ + + "AddPolicy-scenario": { + executor: "shared-iterations", + vus: 1, + iterations: data.length, + maxDuration: "1h" + } + } +}; + +export default () => { + client.connect('10.1.255.198:6060', { + plaintext: true, +// timeout: 10000 + }); + + var item = data[exec.scenario.iterationInInstance]; + const response = client.invoke('policy.PolicyService/PolicyUpdateService', item); + + check(response, { + 'status is OK': (r) => r && r.status === grpc.StatusOK, + }); + + console.log(JSON.stringify(response.message)); + + client.close(); + sleep(1); +}; + +export function handleSummary(data) { + + return { + 'summary_add_1.json': JSON.stringify(data.metrics.grpc_req_duration.values), //the default data object + }; +} diff --git a/src/tests/benchmark/policy/README.md b/src/tests/benchmark/policy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b5b2a01efc357b5d8eca6a6890b051b4ffac260 --- /dev/null +++ b/src/tests/benchmark/policy/README.md @@ -0,0 +1,17 @@ + +# Grafana k6 load testing tool + +# K6 Installation Instructions on Ubuntu + +sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 +echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list +sudo apt-get update +sudo apt-get install k6 + +Or install k6 via snap: + +sudo apt install snapd +sudo snap install k6 + +# Running K6 +k6 run script.js \ No newline at end of file diff --git a/src/tests/benchmark/policy/__init__.py b/src/tests/benchmark/policy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/benchmark/policy/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/benchmark/policy/deploy_specs.sh b/src/tests/benchmark/policy/deploy_specs.sh new file mode 100644 index 0000000000000000000000000000000000000000..ffd91da35186fe21f418950493ef797a9af1b522 --- /dev/null +++ b/src/tests/benchmark/policy/deploy_specs.sh @@ -0,0 +1,26 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector +export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} diff --git a/src/tests/benchmark/policy/descriptors_emulated.json b/src/tests/benchmark/policy/descriptors_emulated.json new file mode 100644 index 0000000000000000000000000000000000000000..a71d454f41f324cabb48a023d6d840a59245800c --- /dev/null +++ b/src/tests/benchmark/policy/descriptors_emulated.json @@ -0,0 +1,121 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, + "device_ids": [ + {"device_uuid": {"uuid": "R1-EMU"}}, + {"device_uuid": {"uuid": "R2-EMU"}}, + {"device_uuid": {"uuid": "R3-EMU"}}, + {"device_uuid": {"uuid": "R4-EMU"}}, + {"device_uuid": {"uuid": "O1-OLS"}} + ], + "link_ids": [ + {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, + {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, + {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, + {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical", "sample_types": []}, + {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []}, + {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []}, + {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "endpoint_uuid": {"uuid": "13/0/0"}}, + {"device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "endpoint_uuid": {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}} + ] + } + ] +} \ No newline at end of file diff --git a/src/tests/benchmark/policy/run_test_01_bootstrap.sh b/src/tests/benchmark/policy/run_test_01_bootstrap.sh new file mode 100755 index 0000000000000000000000000000000000000000..10b18257b937a5aae82a66cd5e3df83abd44e1d8 --- /dev/null +++ b/src/tests/benchmark/policy/run_test_01_bootstrap.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/policy/tests/test_functional_bootstrap.py diff --git a/src/tests/benchmark/policy/run_test_02_create_service.sh b/src/tests/benchmark/policy/run_test_02_create_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..69ef34ff954d550fbe2c22719f0afb2eb3360525 --- /dev/null +++ b/src/tests/benchmark/policy/run_test_02_create_service.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/policy/tests/test_functional_create_service.py diff --git a/src/tests/benchmark/policy/run_test_03_delete_service.sh b/src/tests/benchmark/policy/run_test_03_delete_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..01eb521310053b06538c91cbfaae80aa3b2fdd45 --- /dev/null +++ b/src/tests/benchmark/policy/run_test_03_delete_service.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/policy/tests/test_functional_delete_service.py diff --git a/src/tests/benchmark/policy/run_test_04_cleanup.sh b/src/tests/benchmark/policy/run_test_04_cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..a2be265de04552cbebe83decba538656232bf904 --- /dev/null +++ b/src/tests/benchmark/policy/run_test_04_cleanup.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/benchmark/policy/tests/test_functional_cleanup.py diff --git a/src/tests/benchmark/policy/tests/.gitignore b/src/tests/benchmark/policy/tests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..76cb708d1b532c9b69166e55f36bcb912fd5e370 --- /dev/null +++ b/src/tests/benchmark/policy/tests/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +Credentials.py diff --git a/src/tests/benchmark/policy/tests/Fixtures.py b/src/tests/benchmark/policy/tests/Fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..3b35a12e299ba776e909fbdd2739e971431083a6 --- /dev/null +++ b/src/tests/benchmark/policy/tests/Fixtures.py @@ -0,0 +1,28 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest, logging +from common.Settings import get_setting +from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.MockOSM import MockOSM +from .Objects import WIM_MAPPING + +LOGGER = logging.getLogger(__name__) + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) diff --git a/src/tests/benchmark/policy/tests/Objects.py b/src/tests/benchmark/policy/tests/Objects.py new file mode 100644 index 0000000000000000000000000000000000000000..7bfbe9fce558d6a86d965ecb6421369d7f544d4d --- /dev/null +++ b/src/tests/benchmark/policy/tests/Objects.py @@ -0,0 +1,38 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from tests.tools.mock_osm.Tools import connection_point, wim_mapping + +# ----- WIM Service Settings ------------------------------------------------------------------------------------------- + +WIM_DC1_SITE_ID = '1' +WIM_DC1_DEVICE_ID = json_device_id('R1-EMU') +WIM_DC1_ENDPOINT_ID = json_endpoint_id(WIM_DC1_DEVICE_ID, '13/1/2') + +WIM_DC2_SITE_ID = '2' +WIM_DC2_DEVICE_ID = json_device_id('R3-EMU') +WIM_DC2_ENDPOINT_ID = json_endpoint_id(WIM_DC2_DEVICE_ID, '13/1/2') + +WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(WIM_DC1_SITE_ID, WIM_DC1_ENDPOINT_ID) +WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(WIM_DC2_SITE_ID, WIM_DC2_ENDPOINT_ID) +WIM_MAPPING = [WIM_MAP_DC1, WIM_MAP_DC2] + +WIM_SRV_VLAN_ID = 300 +WIM_SERVICE_TYPE = 'ELINE' +WIM_SERVICE_CONNECTION_POINTS = [ + connection_point(WIM_SEP_DC1, 'dot1q', WIM_SRV_VLAN_ID), + connection_point(WIM_SEP_DC2, 'dot1q', WIM_SRV_VLAN_ID), +] diff --git a/src/tests/benchmark/policy/tests/__init__.py b/src/tests/benchmark/policy/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/benchmark/policy/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/benchmark/policy/tests/test_functional_bootstrap.py b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..71deb9d596b1494e148b140902ca927e5d664dd3 --- /dev/null +++ b/src/tests/benchmark/policy/tests/test_functional_bootstrap.py @@ -0,0 +1,95 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, time +from common.proto.context_pb2 import ContextId, Empty +from common.proto.monitoring_pb2 import KpiDescriptorList +from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from monitoring.client.MonitoringClient import MonitoringClient +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # ----- List entities - Ensure database is empty ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 + + + # ----- Load Scenario ---------------------------------------------------------------------------------------------- + descriptor_loader = load_scenario_from_descriptor( + DESCRIPTOR_FILE, context_client, device_client, None, None) + + + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 + +def test_scenario_kpis_created( + context_client : ContextClient, # pylint: disable=redefined-outer-name + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that KPIs related to the service/device/endpoint were created + during the service creation process. + """ + response = context_client.ListDevices(Empty()) + kpis_expected = set() + for device in response.devices: + device_uuid = device.device_id.device_uuid.uuid + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + for kpi_sample_type in endpoint.kpi_sample_types: + kpis_expected.add((device_uuid, endpoint_uuid, kpi_sample_type)) + num_kpis_expected = len(kpis_expected) + LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected)) + + num_kpis_created, num_retry = 0, 0 + while (num_kpis_created != num_kpis_expected) and (num_retry < 5): + response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty()) + num_kpis_created = len(response.kpi_descriptor_list) + LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created)) + time.sleep(0.5) + num_retry += 1 + assert num_kpis_created == num_kpis_expected diff --git a/src/tests/benchmark/policy/tests/test_functional_cleanup.py b/src/tests/benchmark/policy/tests/test_functional_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..be807eaa0242f2363b5b6c189ce4de264528a54c --- /dev/null +++ b/src/tests/benchmark/policy/tests/test_functional_cleanup.py @@ -0,0 +1,80 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.tools.descriptor.Loader import DescriptorLoader +from common.tools.object_factory.Context import json_context_id +from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + + +def test_services_removed( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 + + + # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- + for link in descriptor_loader.links: + context_client.RemoveLink(LinkId(**link['link_id'])) + + for device in descriptor_loader.devices: + device_client .DeleteDevice(DeviceId(**device['device_id'])) + + for context_uuid, topology_list in descriptor_loader.topologies.items(): + for topology in topology_list: + context_client.RemoveTopology(TopologyId(**topology['topology_id'])) + + for context in descriptor_loader.contexts: + context_client.RemoveContext(ContextId(**context['context_id'])) + + + # ----- List entities - Ensure database is empty again ------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 diff --git a/src/tests/benchmark/policy/tests/test_functional_create_service.py b/src/tests/benchmark/policy/tests/test_functional_create_service.py new file mode 100644 index 0000000000000000000000000000000000000000..e606d060d52631ba72e191d7c025bd7b43048b39 --- /dev/null +++ b/src/tests/benchmark/policy/tests/test_functional_create_service.py @@ -0,0 +1,124 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, random +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ContextId, Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.tools.descriptor.Loader import DescriptorLoader +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from monitoring.client.MonitoringClient import MonitoringClient +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import +from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value +DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value + +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + +def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 + + + # ----- Create Service --------------------------------------------------------------------------------------------- + service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) + osm_wim.get_connectivity_service_status(service_uuid) + + + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI) + + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service + + +def test_scenario_kpi_values_created( + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that KPI values have been inserted into the monitoring database. + We short k KPI descriptors to test. + """ + response = monitoring_client.GetKpiDescriptorList(Empty()) + kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2) + + for kpi_descriptor in kpi_descriptors: + MSG = 'KPI(kpi_uuid={:s}, device_uuid={:s}, endpoint_uuid={:s}, service_uuid={:s}, kpi_sample_type={:s})...' + LOGGER.info(MSG.format( + str(kpi_descriptor.kpi_id.kpi_id.uuid), str(kpi_descriptor.device_id.device_uuid.uuid), + str(kpi_descriptor.endpoint_id.endpoint_uuid.uuid), str(kpi_descriptor.service_id.service_uuid.uuid), + str(KpiSampleType.Name(kpi_descriptor.kpi_sample_type)))) + response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id) + kpi_uuid = response.kpi_id.kpi_id.uuid + assert kpi_uuid == kpi_descriptor.kpi_id.kpi_id.uuid + kpi_value_type = response.kpi_value.WhichOneof('value') + if kpi_value_type is None: + MSG = ' KPI({:s}): No instant value found' + LOGGER.warning(MSG.format(str(kpi_uuid))) + else: + kpi_timestamp = response.timestamp.timestamp + assert kpi_timestamp > 0 + assert kpi_value_type == 'floatVal' + kpi_value = getattr(response.kpi_value, kpi_value_type) + MSG = ' KPI({:s}): timestamp={:s} value_type={:s} value={:s}' + LOGGER.info(MSG.format(str(kpi_uuid), str(kpi_timestamp), str(kpi_value_type), str(kpi_value))) diff --git a/src/tests/benchmark/policy/tests/test_functional_delete_service.py b/src/tests/benchmark/policy/tests/test_functional_delete_service.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8d088012bed164e4603a813bfe9154eda8f568 --- /dev/null +++ b/src/tests/benchmark/policy/tests/test_functional_delete_service.py @@ -0,0 +1,99 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.Constants import DEFAULT_CONTEXT_UUID +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader +from common.tools.object_factory.Context import json_context_id +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from tests.Fixtures import context_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value +DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value + +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + + +def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + l3nm_service_uuids = set() + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) + for service in response.services: + service_id = service.service_id + + if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + service_uuid = service_id.service_uuid.uuid + l3nm_service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} + + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service + + # Identify service to delete + assert len(l3nm_service_uuids) == 1 # assume a single L3NM service has been created + l3nm_service_uuid = set(l3nm_service_uuids).pop() + + + # ----- Delete Service --------------------------------------------------------------------------------------------- + osm_wim.delete_connectivity_service(l3nm_service_uuid) + + + # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == descriptor_loader.num_contexts + + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == descriptor_loader.num_devices + + response = context_client.ListLinks(Empty()) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 diff --git a/src/tests/ecoc22/run_tests_and_coverage.sh b/src/tests/ecoc22/run_tests_and_coverage.sh index 835867896020f2b94e0797bdf60c85af2228eda2..4517cc1ea7eec7027219517720c99bfea3b4250b 100755 --- a/src/tests/ecoc22/run_tests_and_coverage.sh +++ b/src/tests/ecoc22/run_tests_and_coverage.sh @@ -16,7 +16,6 @@ PROJECTDIR=`pwd` -cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc COVERAGEFILE=$PROJECTDIR/coverage/.coverage @@ -26,18 +25,20 @@ cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PRO # Destroy old coverage file rm -f $COVERAGEFILE +source tfs_runtime_env_vars.sh + # Force a flush of Context database kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL # Run functional tests and analyze code coverage at the same time coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_bootstrap.py + src/tests/ecoc22/tests/test_functional_bootstrap.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_create_service.py + src/tests/ecoc22/tests/test_functional_create_service.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_delete_service.py + src/tests/ecoc22/tests/test_functional_delete_service.py coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_cleanup.py + src/tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/nfvsdn22 b/src/tests/nfvsdn22 new file mode 120000 index 0000000000000000000000000000000000000000..e8122da56327bf631c751cbe38ce6b37d3dc7378 --- /dev/null +++ b/src/tests/nfvsdn22 @@ -0,0 +1 @@ +./scenario2 \ No newline at end of file diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh index 8afd683843d4882e75c3cbca8363aa3d63edda7f..ffd91da35186fe21f418950493ef797a9af1b522 100644 --- a/src/tests/ofc22/deploy_specs.sh +++ b/src/tests/ofc22/deploy_specs.sh @@ -2,6 +2,11 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. +# Supported components are: +# context device automation policy service compute monitoring webui +# interdomain slice pathcomp dlt +# dbscanserving opticalattackmitigator opticalattackdetector +# l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui" # Set the tag you want to use for your images. @@ -13,5 +18,9 @@ export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" -# Set the neew Grafana admin password +# Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""} diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json index 83f9c39e2ac7154b088ccdd0a1519ea32c1aee1d..a71d454f41f324cabb48a023d6d840a59245800c 100644 --- a/src/tests/ofc22/descriptors_emulated.json +++ b/src/tests/ofc22/descriptors_emulated.json @@ -9,70 +9,83 @@ "topologies": [ { "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}}, - "device_ids": [], - "link_ids": [] + "device_ids": [ + {"device_uuid": {"uuid": "R1-EMU"}}, + {"device_uuid": {"uuid": "R2-EMU"}}, + {"device_uuid": {"uuid": "R3-EMU"}}, + {"device_uuid": {"uuid": "R4-EMU"}}, + {"device_uuid": {"uuid": "O1-OLS"}} + ], + "link_ids": [ + {"link_uuid": {"uuid": "R1-EMU/13/0/0==O1-OLS/aade6001-f00b-5e2f-a357-6a0a9d3de870"}}, + {"link_uuid": {"uuid": "R2-EMU/13/0/0==O1-OLS/eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}}, + {"link_uuid": {"uuid": "R3-EMU/13/0/0==O1-OLS/0ef74f99-1acc-57bd-ab9d-4b958b06c513"}}, + {"link_uuid": {"uuid": "R4-EMU/13/0/0==O1-OLS/50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"}} + ] } ], "devices": [ { - "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R1-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R2-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R3-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, - "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "R4-EMU"}}, "device_type": "emu-packet-router", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"13/0/0\"}, {\"sample_types\": [101, 102, 201, 202], \"type\": \"copper\", \"uuid\": \"13/1/2\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "13/0/0", "type": "optical", "sample_types": []}, + {"uuid": "13/1/2", "type": "copper", "sample_types": [101, 102, 201, 202]} + ]}}} + ]} }, { - "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, - "device_type": "emu-open-line-system", + "device_id": {"device_uuid": {"uuid": "O1-OLS"}}, "device_type": "emu-open-line-system", + "device_operational_status": 1, "device_drivers": [0], "device_endpoints": [], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"aade6001-f00b-5e2f-a357-6a0a9d3de870\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"eb287d83-f05e-53ec-ab5a-adf6bd2b5418\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"0ef74f99-1acc-57bd-ab9d-4b958b06c513\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"50296d99-58cc-5ce7-82f5-fc8ee4eec2ec\"}]}"}} - ]}, - "device_operational_status": 1, - "device_drivers": [0], - "device_endpoints": [] + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical", "sample_types": []}, + {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []}, + {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []}, + {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []} + ]}}} + ]} } ], "links": [ diff --git a/src/tests/ofc22/run_test_01_bootstrap.sh b/src/tests/ofc22/run_test_01_bootstrap.sh index bb740707321b24fc960299f2eac91cc2d9775b64..61b49b251f927ffb2e845f0c9094d30ea597abc6 100755 --- a/src/tests/ofc22/run_test_01_bootstrap.sh +++ b/src/tests/ofc22/run_test_01_bootstrap.sh @@ -13,9 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -# make sure to source the following scripts: -# - my_deploy.sh -# - tfs_runtime_env_vars.sh - source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_bootstrap.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_bootstrap.py diff --git a/src/tests/ofc22/run_test_02_create_service.sh b/src/tests/ofc22/run_test_02_create_service.sh index 8b6c8658df759bdcb777f83c6c7846d0ea7b48ed..135a3f74fe93d0d7a4da6ef0e02371a040fc1eb3 100755 --- a/src/tests/ofc22/run_test_02_create_service.sh +++ b/src/tests/ofc22/run_test_02_create_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_create_service.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_create_service.py diff --git a/src/tests/ofc22/run_test_03_delete_service.sh b/src/tests/ofc22/run_test_03_delete_service.sh index 51df41aee216e141b0d2e2f55a0398ecd9cdf35f..cbe6714fe91cf1758f62e697e667568d35578181 100755 --- a/src/tests/ofc22/run_test_03_delete_service.sh +++ b/src/tests/ofc22/run_test_03_delete_service.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_delete_service.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_delete_service.py diff --git a/src/tests/ofc22/run_test_04_cleanup.sh b/src/tests/ofc22/run_test_04_cleanup.sh index 2ba91684f9eb49075dd68877e54976f989811ae9..e88ddbd3227b3f29dfc7f126d5853e0b1d0e06f1 100755 --- a/src/tests/ofc22/run_test_04_cleanup.sh +++ b/src/tests/ofc22/run_test_04_cleanup.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -pytest --verbose src/tests/ofc22/tests/test_functional_cleanup.py +pytest --verbose --log-level=INFO -o log_cli=true -o log_cli_level=INFO src/tests/ofc22/tests/test_functional_cleanup.py diff --git a/src/tests/ofc22/run_tests_and_coverage.sh b/src/tests/ofc22/run_tests.sh similarity index 62% rename from src/tests/ofc22/run_tests_and_coverage.sh rename to src/tests/ofc22/run_tests.sh index bafc920c71a640d083497e1cd6ae025d0ea7cef5..0ad4be313987b8b5069808873f94840521d4284e 100755 --- a/src/tests/ofc22/run_tests_and_coverage.sh +++ b/src/tests/ofc22/run_tests.sh @@ -16,30 +16,29 @@ PROJECTDIR=`pwd` -cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc COVERAGEFILE=$PROJECTDIR/coverage/.coverage # Configure the correct folder on the .coveragerc file -cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR/src+g > $RCFILE # Destroy old coverage file rm -f $COVERAGEFILE +source tfs_runtime_env_vars.sh + # Force a flush of Context database kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL -source tfs_runtime_env_vars.sh - -# Run functional tests and analyze code coverage at the same time -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_bootstrap.py +# Run functional tests +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_bootstrap.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_create_service.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_create_service.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_delete_service.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_delete_service.py -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ofc22/tests/test_functional_cleanup.py +pytest --log-level=INFO --verbose \ + src/tests/ofc22/tests/test_functional_cleanup.py diff --git a/src/tests/ofc22/setup_test_env.sh b/src/tests/ofc22/setup_test_env.sh deleted file mode 100755 index 1f8b0a5a7a8dc986715c6f54a62151f6afa4ad80..0000000000000000000000000000000000000000 --- a/src/tests/ofc22/setup_test_env.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get service/contextservice --namespace tfs --template '{{.spec.clusterIP}}') -export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service/contextservice --namespace tfs -o jsonpath='{.spec.ports[?(@.name=="grpc")].port}') -export COMPUTESERVICE_SERVICE_HOST=$(kubectl get service/computeservice --namespace tfs --template '{{.spec.clusterIP}}') -export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service/computeservice --namespace tfs -o jsonpath='{.spec.ports[?(@.name=="http")].port}') -echo "CONTEXTSERVICE_SERVICE_HOST=$CONTEXTSERVICE_SERVICE_HOST" -echo "CONTEXTSERVICE_SERVICE_PORT_GRPC=$CONTEXTSERVICE_SERVICE_PORT_GRPC" -echo "COMPUTESERVICE_SERVICE_HOST=$COMPUTESERVICE_SERVICE_HOST" -echo "COMPUTESERVICE_SERVICE_PORT_HTTP=$COMPUTESERVICE_SERVICE_PORT_HTTP" diff --git a/src/tests/ofc22/tests/Fixtures.py b/src/tests/ofc22/tests/Fixtures.py index 370731e5de14b2c7c4acdcfa86eacfa66f2ffd4b..3b35a12e299ba776e909fbdd2739e971431083a6 100644 --- a/src/tests/ofc22/tests/Fixtures.py +++ b/src/tests/ofc22/tests/Fixtures.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest +import pytest, logging from common.Settings import get_setting -from compute.tests.mock_osm.MockOSM import MockOSM -from .Objects import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.Constants import WIM_PASSWORD, WIM_USERNAME +from tests.tools.mock_osm.MockOSM import MockOSM +from .Objects import WIM_MAPPING +LOGGER = logging.getLogger(__name__) @pytest.fixture(scope='session') def osm_wim(): wim_url = 'http://{:s}:{:s}'.format( get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + LOGGER.info('WIM_MAPPING = {:s}'.format(str(WIM_MAPPING))) return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) diff --git a/src/tests/ofc22/tests/Objects.py b/src/tests/ofc22/tests/Objects.py index d2fb32ebb20b7bcdda9ac12b7a7390c46e6fb1d1..7bfbe9fce558d6a86d965ecb6421369d7f544d4d 100644 --- a/src/tests/ofc22/tests/Objects.py +++ b/src/tests/ofc22/tests/Objects.py @@ -12,220 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple -from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID -from common.tools.object_factory.Context import json_context, json_context_id -from common.tools.object_factory.Device import ( - json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, - json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) -from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id -from common.tools.object_factory.Link import json_link, json_link_id -from common.tools.object_factory.Topology import json_topology, json_topology_id -from common.proto.kpi_sample_types_pb2 import KpiSampleType - -# ----- Context -------------------------------------------------------------------------------------------------------- -CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) -CONTEXT = json_context(DEFAULT_CONTEXT_UUID) - -# ----- Topology ------------------------------------------------------------------------------------------------------- -TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) -TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) - -# ----- Monitoring Samples --------------------------------------------------------------------------------------------- -PACKET_PORT_SAMPLE_TYPES = [ - KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, - KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, - KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, - KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, -] - -# ----- Device Credentials and Settings -------------------------------------------------------------------------------- -try: - from .Credentials import DEVICE_R1_ADDRESS, DEVICE_R1_PORT, DEVICE_R1_USERNAME, DEVICE_R1_PASSWORD - from .Credentials import DEVICE_R3_ADDRESS, DEVICE_R3_PORT, DEVICE_R3_USERNAME, DEVICE_R3_PASSWORD - from .Credentials import DEVICE_O1_ADDRESS, DEVICE_O1_PORT - USE_REAL_DEVICES = True # Use real devices -except ImportError: - USE_REAL_DEVICES = False # Use emulated devices - - DEVICE_R1_ADDRESS = '0.0.0.0' - DEVICE_R1_PORT = 830 - DEVICE_R1_USERNAME = 'admin' - DEVICE_R1_PASSWORD = 'admin' - - DEVICE_R3_ADDRESS = '0.0.0.0' - DEVICE_R3_PORT = 830 - DEVICE_R3_USERNAME = 'admin' - DEVICE_R3_PASSWORD = 'admin' - - DEVICE_O1_ADDRESS = '0.0.0.0' - DEVICE_O1_PORT = 4900 - -#USE_REAL_DEVICES = False # Uncomment to force to use emulated devices - -def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): - return [ - json_endpoint_id(device_id, ep_uuid, topology_id=None) - for ep_uuid, _, _ in endpoint_descriptors - ] - -def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): - return [ - json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types) - for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors - ] - -def get_link_uuid(a_device_id : Dict, a_endpoint_id : Dict, z_device_id : Dict, z_endpoint_id : Dict) -> str: - return '{:s}/{:s}=={:s}/{:s}'.format( - a_device_id['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], - z_device_id['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) - - -# ----- Devices -------------------------------------------------------------------------------------------------------- -if not USE_REAL_DEVICES: - json_device_packetrouter_disabled = json_device_emulated_packet_router_disabled - json_device_tapi_disabled = json_device_emulated_tapi_disabled - -DEVICE_R1_UUID = 'R1-EMU' -DEVICE_R1_TIMEOUT = 120 -DEVICE_R1_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R1_ID = json_device_id(DEVICE_R1_UUID) -#DEVICE_R1_ENDPOINTS = json_endpoints(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) -DEVICE_R1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R1_ID, DEVICE_R1_ENDPOINT_DEFS) -DEVICE_R1 = json_device_packetrouter_disabled(DEVICE_R1_UUID) -ENDPOINT_ID_R1_13_0_0 = DEVICE_R1_ENDPOINT_IDS[0] -ENDPOINT_ID_R1_13_1_2 = DEVICE_R1_ENDPOINT_IDS[1] -DEVICE_R1_CONNECT_RULES = json_device_connect_rules(DEVICE_R1_ADDRESS, DEVICE_R1_PORT, { - 'username': DEVICE_R1_USERNAME, - 'password': DEVICE_R1_PASSWORD, - 'timeout' : DEVICE_R1_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R1_ENDPOINT_DEFS) - - -DEVICE_R2_UUID = 'R2-EMU' -DEVICE_R2_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R2_ID = json_device_id(DEVICE_R2_UUID) -#DEVICE_R2_ENDPOINTS = json_endpoints(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) -DEVICE_R2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R2_ID, DEVICE_R2_ENDPOINT_DEFS) -DEVICE_R2 = json_device_emulated_packet_router_disabled(DEVICE_R2_UUID) -ENDPOINT_ID_R2_13_0_0 = DEVICE_R2_ENDPOINT_IDS[0] -ENDPOINT_ID_R2_13_1_2 = DEVICE_R2_ENDPOINT_IDS[1] -DEVICE_R2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R2_ENDPOINT_DEFS) - - -DEVICE_R3_UUID = 'R3-EMU' -DEVICE_R3_TIMEOUT = 120 -DEVICE_R3_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R3_ID = json_device_id(DEVICE_R3_UUID) -#DEVICE_R3_ENDPOINTS = json_endpoints(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) -DEVICE_R3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R3_ID, DEVICE_R3_ENDPOINT_DEFS) -DEVICE_R3 = json_device_packetrouter_disabled(DEVICE_R3_UUID) -ENDPOINT_ID_R3_13_0_0 = DEVICE_R3_ENDPOINT_IDS[0] -ENDPOINT_ID_R3_13_1_2 = DEVICE_R3_ENDPOINT_IDS[1] -DEVICE_R3_CONNECT_RULES = json_device_connect_rules(DEVICE_R3_ADDRESS, DEVICE_R3_PORT, { - 'username': DEVICE_R3_USERNAME, - 'password': DEVICE_R3_PASSWORD, - 'timeout' : DEVICE_R3_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_R3_ENDPOINT_DEFS) - - -DEVICE_R4_UUID = 'R4-EMU' -DEVICE_R4_ENDPOINT_DEFS = [('13/0/0', 'optical', []), ('13/1/2', 'copper', PACKET_PORT_SAMPLE_TYPES)] -DEVICE_R4_ID = json_device_id(DEVICE_R4_UUID) -#DEVICE_R4_ENDPOINTS = json_endpoints(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) -DEVICE_R4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_R4_ID, DEVICE_R4_ENDPOINT_DEFS) -DEVICE_R4 = json_device_emulated_packet_router_disabled(DEVICE_R4_UUID) -ENDPOINT_ID_R4_13_0_0 = DEVICE_R4_ENDPOINT_IDS[0] -ENDPOINT_ID_R4_13_1_2 = DEVICE_R4_ENDPOINT_IDS[1] -DEVICE_R4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_R4_ENDPOINT_DEFS) - - -DEVICE_O1_UUID = 'O1-OLS' -DEVICE_O1_TIMEOUT = 120 -DEVICE_O1_ENDPOINT_DEFS = [ - ('aade6001-f00b-5e2f-a357-6a0a9d3de870', 'optical', []), # node_1_port_13 - ('eb287d83-f05e-53ec-ab5a-adf6bd2b5418', 'optical', []), # node_2_port_13 - ('0ef74f99-1acc-57bd-ab9d-4b958b06c513', 'optical', []), # node_3_port_13 - ('50296d99-58cc-5ce7-82f5-fc8ee4eec2ec', 'optical', []), # node_4_port_13 -] -DEVICE_O1_ID = json_device_id(DEVICE_O1_UUID) -DEVICE_O1 = json_device_tapi_disabled(DEVICE_O1_UUID) -#DEVICE_O1_ENDPOINTS = json_endpoints(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS) -DEVICE_O1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_O1_ID, DEVICE_O1_ENDPOINT_DEFS) -ENDPOINT_ID_O1_EP1 = DEVICE_O1_ENDPOINT_IDS[0] -ENDPOINT_ID_O1_EP2 = DEVICE_O1_ENDPOINT_IDS[1] -ENDPOINT_ID_O1_EP3 = DEVICE_O1_ENDPOINT_IDS[2] -ENDPOINT_ID_O1_EP4 = DEVICE_O1_ENDPOINT_IDS[3] -DEVICE_O1_CONNECT_RULES = json_device_connect_rules(DEVICE_O1_ADDRESS, DEVICE_O1_PORT, { - 'timeout' : DEVICE_O1_TIMEOUT, -}) if USE_REAL_DEVICES else json_device_emulated_connect_rules(DEVICE_O1_ENDPOINT_DEFS) - - -# ----- Links ---------------------------------------------------------------------------------------------------------- -LINK_R1_O1_UUID = get_link_uuid(DEVICE_R1_ID, ENDPOINT_ID_R1_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP1) -LINK_R1_O1_ID = json_link_id(LINK_R1_O1_UUID) -LINK_R1_O1 = json_link(LINK_R1_O1_UUID, [ENDPOINT_ID_R1_13_0_0, ENDPOINT_ID_O1_EP1]) - -LINK_R2_O1_UUID = get_link_uuid(DEVICE_R2_ID, ENDPOINT_ID_R2_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP2) -LINK_R2_O1_ID = json_link_id(LINK_R2_O1_UUID) -LINK_R2_O1 = json_link(LINK_R2_O1_UUID, [ENDPOINT_ID_R2_13_0_0, ENDPOINT_ID_O1_EP2]) - -LINK_R3_O1_UUID = get_link_uuid(DEVICE_R3_ID, ENDPOINT_ID_R3_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP3) -LINK_R3_O1_ID = json_link_id(LINK_R3_O1_UUID) -LINK_R3_O1 = json_link(LINK_R3_O1_UUID, [ENDPOINT_ID_R3_13_0_0, ENDPOINT_ID_O1_EP3]) - -LINK_R4_O1_UUID = get_link_uuid(DEVICE_R4_ID, ENDPOINT_ID_R4_13_0_0, DEVICE_O1_ID, ENDPOINT_ID_O1_EP4) -LINK_R4_O1_ID = json_link_id(LINK_R4_O1_UUID) -LINK_R4_O1 = json_link(LINK_R4_O1_UUID, [ENDPOINT_ID_R4_13_0_0, ENDPOINT_ID_O1_EP4]) - +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from tests.tools.mock_osm.Tools import connection_point, wim_mapping # ----- WIM Service Settings ------------------------------------------------------------------------------------------- -def compose_service_endpoint_id(endpoint_id): - device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] - endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] - return ':'.join([device_uuid, endpoint_uuid]) - -WIM_SEP_R1_ID = compose_service_endpoint_id(ENDPOINT_ID_R1_13_1_2) -WIM_SEP_R1_SITE_ID = '1' -WIM_SEP_R1_BEARER = WIM_SEP_R1_ID -WIM_SRV_R1_VLAN_ID = 400 +WIM_DC1_SITE_ID = '1' +WIM_DC1_DEVICE_ID = json_device_id('R1-EMU') +WIM_DC1_ENDPOINT_ID = json_endpoint_id(WIM_DC1_DEVICE_ID, '13/1/2') -WIM_SEP_R3_ID = compose_service_endpoint_id(ENDPOINT_ID_R3_13_1_2) -WIM_SEP_R3_SITE_ID = '2' -WIM_SEP_R3_BEARER = WIM_SEP_R3_ID -WIM_SRV_R3_VLAN_ID = 500 +WIM_DC2_SITE_ID = '2' +WIM_DC2_DEVICE_ID = json_device_id('R3-EMU') +WIM_DC2_ENDPOINT_ID = json_endpoint_id(WIM_DC2_DEVICE_ID, '13/1/2') -WIM_USERNAME = 'admin' -WIM_PASSWORD = 'admin' +WIM_SEP_DC1, WIM_MAP_DC1 = wim_mapping(WIM_DC1_SITE_ID, WIM_DC1_ENDPOINT_ID) +WIM_SEP_DC2, WIM_MAP_DC2 = wim_mapping(WIM_DC2_SITE_ID, WIM_DC2_ENDPOINT_ID) +WIM_MAPPING = [WIM_MAP_DC1, WIM_MAP_DC2] -WIM_MAPPING = [ - {'device-id': DEVICE_R1_UUID, 'service_endpoint_id': WIM_SEP_R1_ID, - 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R1_BEARER}, 'site-id': WIM_SEP_R1_SITE_ID}}, - {'device-id': DEVICE_R3_UUID, 'service_endpoint_id': WIM_SEP_R3_ID, - 'service_mapping_info': {'bearer': {'bearer-reference': WIM_SEP_R3_BEARER}, 'site-id': WIM_SEP_R3_SITE_ID}}, -] +WIM_SRV_VLAN_ID = 300 WIM_SERVICE_TYPE = 'ELINE' WIM_SERVICE_CONNECTION_POINTS = [ - {'service_endpoint_id': WIM_SEP_R1_ID, - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R1_VLAN_ID}}, - {'service_endpoint_id': WIM_SEP_R3_ID, - 'service_endpoint_encapsulation_type': 'dot1q', - 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_R3_VLAN_ID}}, + connection_point(WIM_SEP_DC1, 'dot1q', WIM_SRV_VLAN_ID), + connection_point(WIM_SEP_DC2, 'dot1q', WIM_SRV_VLAN_ID), ] - -# ----- Object Collections --------------------------------------------------------------------------------------------- - -CONTEXTS = [CONTEXT] -TOPOLOGIES = [TOPOLOGY] - -DEVICES = [ - (DEVICE_R1, DEVICE_R1_CONNECT_RULES), - (DEVICE_R2, DEVICE_R2_CONNECT_RULES), - (DEVICE_R3, DEVICE_R3_CONNECT_RULES), - (DEVICE_R4, DEVICE_R4_CONNECT_RULES), - (DEVICE_O1, DEVICE_O1_CONNECT_RULES), -] - -LINKS = [LINK_R1_O1, LINK_R2_O1, LINK_R3_O1, LINK_R4_O1] \ No newline at end of file diff --git a/src/tests/ofc22/tests/test_functional_bootstrap.py b/src/tests/ofc22/tests/test_functional_bootstrap.py index 76c52810bb855a28f772dcc564e97e9f3ff1f92e..71deb9d596b1494e148b140902ca927e5d664dd3 100644 --- a/src/tests/ofc22/tests/test_functional_bootstrap.py +++ b/src/tests/ofc22/tests/test_functional_bootstrap.py @@ -12,27 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytest -from common.Settings import get_setting +import logging, time +from common.proto.context_pb2 import ContextId, Empty from common.proto.monitoring_pb2 import KpiDescriptorList -from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tests.LoadScenario import load_scenario_from_descriptor +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from monitoring.client.MonitoringClient import MonitoringClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology from device.client.DeviceClient import DeviceClient -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES -from tests.Fixtures import context_client, device_client, monitoring_client +from monitoring.client.MonitoringClient import MonitoringClient +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_scenario_empty(context_client : ContextClient): # pylint: disable=redefined-outer-name +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: # ----- List entities - Ensure database is empty ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 @@ -44,161 +43,53 @@ def test_scenario_empty(context_client : ContextClient): # pylint: disable=rede assert len(response.links) == 0 -def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- - for context in CONTEXTS: - context_uuid = context['context_id']['context_uuid']['uuid'] - LOGGER.info('Adding Context {:s}'.format(context_uuid)) - response = context_client.SetContext(Context(**context)) - assert response.context_uuid.uuid == context_uuid - #expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid))) - - for topology in TOPOLOGIES: - context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] - topology_uuid = topology['topology_id']['topology_uuid']['uuid'] - LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) - response = context_client.SetTopology(Topology(**topology)) - assert response.context_id.context_uuid.uuid == context_uuid - assert response.topology_uuid.uuid == topology_uuid - context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id))) + # ----- Load Scenario ---------------------------------------------------------------------------------------------- + descriptor_loader = load_scenario_from_descriptor( + DESCRIPTOR_FILE, context_client, device_client, None, None) - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_devices_bootstraping( - context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client, log_events_received=True) - #events_collector.start() - - #expected_events = [] - - # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- - for device, connect_rules in DEVICES: - device_uuid = device['device_id']['device_uuid']['uuid'] - LOGGER.info('Adding Device {:s}'.format(device_uuid)) - - device_with_connect_rules = copy.deepcopy(device) - device_with_connect_rules['device_config']['config_rules'].extend(connect_rules) - response = device_client.AddDevice(Device(**device_with_connect_rules)) - assert response.device_uuid.uuid == device_uuid - - #expected_events.extend([ - # # Device creation, update for automation to start the device - # ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)), - # #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)), - #]) - - #response = context_client.GetDevice(response) - #for endpoint in response.device_endpoints: - # for _ in endpoint.kpi_sample_types: - # # Monitoring configures monitoring for endpoint - # expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_links_creation(context_client : ContextClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Create Links and Validate Collected Events ----------------------------------------------------------------- - for link in LINKS: - link_uuid = link['link_id']['link_uuid']['uuid'] - LOGGER.info('Adding Link {:s}'.format(link_uuid)) - response = context_client.SetLink(Link(**link)) - assert response.link_uuid.uuid == link_uuid - #expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure links are created ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + assert len(response.links) == descriptor_loader.num_links + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 -def test_scenario_kpis_created(monitoring_client: MonitoringClient): +def test_scenario_kpis_created( + context_client : ContextClient, # pylint: disable=redefined-outer-name + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: """ This test validates that KPIs related to the service/device/endpoint were created during the service creation process. """ - response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty()) - LOGGER.info("Number of KPIs created: {}".format(len(response.kpi_descriptor_list))) - # TODO: replace the magic number `16` below for a formula that adapts to the number - # of links and devices - assert len(response.kpi_descriptor_list) == 16 + response = context_client.ListDevices(Empty()) + kpis_expected = set() + for device in response.devices: + device_uuid = device.device_id.device_uuid.uuid + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + for kpi_sample_type in endpoint.kpi_sample_types: + kpis_expected.add((device_uuid, endpoint_uuid, kpi_sample_type)) + num_kpis_expected = len(kpis_expected) + LOGGER.info('Num KPIs expected: {:d}'.format(num_kpis_expected)) + + num_kpis_created, num_retry = 0, 0 + while (num_kpis_created != num_kpis_expected) and (num_retry < 5): + response: KpiDescriptorList = monitoring_client.GetKpiDescriptorList(Empty()) + num_kpis_created = len(response.kpi_descriptor_list) + LOGGER.info('Num KPIs created: {:d}'.format(num_kpis_created)) + time.sleep(0.5) + num_retry += 1 + assert num_kpis_created == num_kpis_expected diff --git a/src/tests/ofc22/tests/test_functional_cleanup.py b/src/tests/ofc22/tests/test_functional_cleanup.py index b0dfe54900f5a806607fcd669942e7fa592dcbaa..be807eaa0242f2363b5b6c189ce4de264528a54c 100644 --- a/src/tests/ofc22/tests/test_functional_cleanup.py +++ b/src/tests/ofc22/tests/test_functional_cleanup.py @@ -12,93 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest -from common.Settings import get_setting -from common.tests.EventTools import EVENT_REMOVE, check_events +import logging +from common.tools.descriptor.Loader import DescriptorLoader from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.Topology import json_topology_id -from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId +from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from tests.Fixtures import context_client, device_client -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name + +def test_services_removed( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_scenario_cleanup( - context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name - - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client) - #events_collector.start() - - #expected_events = [] - - # ----- Delete Links and Validate Collected Events ----------------------------------------------------------------- - for link in LINKS: - link_id = link['link_id'] - link_uuid = link_id['link_uuid']['uuid'] - LOGGER.info('Deleting Link {:s}'.format(link_uuid)) - context_client.RemoveLink(LinkId(**link_id)) - #expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid))) - - # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- - for device, _ in DEVICES: - device_id = device['device_id'] - device_uuid = device_id['device_uuid']['uuid'] - LOGGER.info('Deleting Device {:s}'.format(device_uuid)) - device_client.DeleteDevice(DeviceId(**device_id)) - #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) - - # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ - for topology in TOPOLOGIES: - topology_id = topology['topology_id'] - context_uuid = topology_id['context_id']['context_uuid']['uuid'] - topology_uuid = topology_id['topology_uuid']['uuid'] - LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) - context_client.RemoveTopology(TopologyId(**topology_id)) - context_id = json_context_id(context_uuid) - #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) - - # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- - for context in CONTEXTS: - context_id = context['context_id'] - context_uuid = context_id['context_uuid']['uuid'] - LOGGER.info('Deleting Context {:s}'.format(context_uuid)) - context_client.RemoveContext(ContextId(**context_id)) - #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() - - -def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, _ in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 + + + # ----- Delete Links, Devices, Topologies, Contexts ---------------------------------------------------------------- + for link in descriptor_loader.links: + context_client.RemoveLink(LinkId(**link['link_id'])) + + for device in descriptor_loader.devices: + device_client .DeleteDevice(DeviceId(**device['device_id'])) + + for context_uuid, topology_list in descriptor_loader.topologies.items(): + for topology in topology_list: + context_client.RemoveTopology(TopologyId(**topology['topology_id'])) + + for context in descriptor_loader.contexts: + context_client.RemoveContext(ContextId(**context['context_id'])) + + # ----- List entities - Ensure database is empty again ------------------------------------------------------------- response = context_client.ListContexts(Empty()) assert len(response.contexts) == 0 diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 5615f119b91fba10dd767d7188b303f926750e06..e606d060d52631ba72e191d7c025bd7b43048b39 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -12,24 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest, random, time +import logging, random from common.DeviceTypes import DeviceTypeEnum -from common.Settings import get_setting -from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Service import json_service_id +from common.proto.context_pb2 import ContextId, Empty +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from common.tools.descriptor.Loader import DescriptorLoader from common.tools.grpc.Tools import grpc_message_to_json_string -from compute.tests.mock_osm.MockOSM import MockOSM +from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from monitoring.client.MonitoringClient import MonitoringClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import ContextId, Empty -from tests.Fixtures import context_client, monitoring_client -from .Fixtures import osm_wim -from .Objects import ( - CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, - WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) +from tests.Fixtures import context_client, device_client, monitoring_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import +from .Objects import WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -37,89 +32,69 @@ LOGGER.setLevel(logging.DEBUG) DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' + +def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) -def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure links are created ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 -def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - # TODO: restablish the tests of the events - # events_collector = EventsCollector(context_client, log_events_received=True) - # events_collector.start() - # ----- Create Service --------------------------------------------------------------------------------------------- service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) osm_wim.get_connectivity_service_status(service_uuid) - # ----- Validate collected events ---------------------------------------------------------------------------------- - - # packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) - # optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS) - # optical_service_uuid = '{:s}:optical'.format(service_uuid) - - # expected_events = [ - # # Create packet service and add first endpoint - # ('ServiceEvent', EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - - # # Configure OLS controller, create optical service, create optical connection - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)), - # ('ServiceEvent', EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)), - - # # Configure endpoint packet devices, add second endpoint to service, create connection - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), - # ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)), - # ] - # check_events(events_collector, expected_events) - - # # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - # events_collector.stop() - - -def test_scenario_service_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links + + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2*num_services # OLS & L3NM => (L3NM + TAPI) - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI - for service in response.services: - service_id = service.service_id - response = context_client.ListConnections(service_id) - LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # one connection per service + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service -def test_scenario_kpi_values_created(monitoring_client: MonitoringClient): +def test_scenario_kpi_values_created( + monitoring_client: MonitoringClient, # pylint: disable=redefined-outer-name +) -> None: """ This test validates that KPI values have been inserted into the monitoring database. We short k KPI descriptors to test. @@ -128,6 +103,22 @@ def test_scenario_kpi_values_created(monitoring_client: MonitoringClient): kpi_descriptors = random.choices(response.kpi_descriptor_list, k=2) for kpi_descriptor in kpi_descriptors: + MSG = 'KPI(kpi_uuid={:s}, device_uuid={:s}, endpoint_uuid={:s}, service_uuid={:s}, kpi_sample_type={:s})...' + LOGGER.info(MSG.format( + str(kpi_descriptor.kpi_id.kpi_id.uuid), str(kpi_descriptor.device_id.device_uuid.uuid), + str(kpi_descriptor.endpoint_id.endpoint_uuid.uuid), str(kpi_descriptor.service_id.service_uuid.uuid), + str(KpiSampleType.Name(kpi_descriptor.kpi_sample_type)))) response = monitoring_client.GetInstantKpi(kpi_descriptor.kpi_id) - assert response.kpi_id.kpi_id.uuid == kpi_descriptor.kpi_id.kpi_id.uuid - assert response.timestamp.timestamp > 0 + kpi_uuid = response.kpi_id.kpi_id.uuid + assert kpi_uuid == kpi_descriptor.kpi_id.kpi_id.uuid + kpi_value_type = response.kpi_value.WhichOneof('value') + if kpi_value_type is None: + MSG = ' KPI({:s}): No instant value found' + LOGGER.warning(MSG.format(str(kpi_uuid))) + else: + kpi_timestamp = response.timestamp.timestamp + assert kpi_timestamp > 0 + assert kpi_value_type == 'floatVal' + kpi_value = getattr(response.kpi_value, kpi_value_type) + MSG = ' KPI({:s}): timestamp={:s} value_type={:s} value={:s}' + LOGGER.info(MSG.format(str(kpi_uuid), str(kpi_timestamp), str(kpi_value_type), str(kpi_value))) diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 5d9568cd81906ac76b600a2253a5e0bdf741bc01..0f8d088012bed164e4603a813bfe9154eda8f568 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -12,23 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest +import logging +from common.Constants import DEFAULT_CONTEXT_UUID from common.DeviceTypes import DeviceTypeEnum -from common.Settings import get_setting -from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Service import json_service_id +from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader +from common.tools.object_factory.Context import json_context_id from common.tools.grpc.Tools import grpc_message_to_json_string -from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum -from tests.Fixtures import context_client -from .Fixtures import osm_wim -from .Objects import ( - CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, - WIM_PASSWORD, WIM_USERNAME) +from tests.Fixtures import context_client # pylint: disable=unused-import +from tests.tools.mock_osm.MockOSM import MockOSM +from .Fixtures import osm_wim # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) @@ -37,86 +31,69 @@ LOGGER.setLevel(logging.DEBUG) DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value +DESCRIPTOR_FILE = 'ofc22/descriptors_emulated.json' -def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + +def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name # ----- List entities - Ensure service is created ------------------------------------------------------------------ + with open(DESCRIPTOR_FILE, 'r', encoding='UTF-8') as f: + descriptors = f.read() + + descriptor_loader = DescriptorLoader(descriptors) + response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI + l3nm_service_uuids = set() + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_UUID))) + assert len(response.services) == 2 # OLS & L3NM => (L3NM + TAPI) for service in response.services: service_id = service.service_id + + if service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM: + service_uuid = service_id.service_uuid.uuid + l3nm_service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} + response = context_client.ListConnections(service_id) LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( - grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response))) assert len(response.connections) == 1 # one connection per service + # Identify service to delete + assert len(l3nm_service_uuids) == 1 # assume a single L3NM service has been created + l3nm_service_uuid = set(l3nm_service_uuids).pop() -def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - #events_collector = EventsCollector(context_client, log_events_received=True) - #events_collector.start() # ----- Delete Service --------------------------------------------------------------------------------------------- - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) - assert len(response.services) == 2 # L3NM + TAPI - service_uuids = set() - for service in response.services: - if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue - service_uuid = service.service_id.service_uuid.uuid - service_uuids.add(service_uuid) - osm_wim.conn_info[service_uuid] = {} - - assert len(service_uuids) == 1 # assume a single L3NM service has been created - service_uuid = set(service_uuids).pop() - - osm_wim.delete_connectivity_service(service_uuid) - - # ----- Validate collected events ---------------------------------------------------------------------------------- - #packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) - #optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS) - #optical_service_uuid = '{:s}:optical'.format(service_uuid) - - #expected_events = [ - # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), - # ('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)), - # ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)), - # ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)), - # ('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), - #] - #check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - #events_collector.stop() + osm_wim.delete_connectivity_service(l3nm_service_uuid) -def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure service is removed ------------------------------------------------------------------ response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) + assert len(response.contexts) == descriptor_loader.num_contexts - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) + for context_uuid, num_topologies in descriptor_loader.num_topologies.items(): + response = context_client.ListTopologies(ContextId(**json_context_id(context_uuid))) + assert len(response.topologies) == num_topologies response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) + assert len(response.devices) == descriptor_loader.num_devices response = context_client.ListLinks(Empty()) - assert len(response.links) == len(LINKS) + assert len(response.links) == descriptor_loader.num_links - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + for context_uuid, num_services in descriptor_loader.num_services.items(): + response = context_client.ListServices(ContextId(**json_context_id(context_uuid))) + assert len(response.services) == 0 diff --git a/src/tests/p4/__init__.py b/src/tests/p4/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/p4/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/p4/deploy_specs.sh b/src/tests/p4/deploy_specs.sh new file mode 100644 index 0000000000000000000000000000000000000000..b486474e2afad7305409bf410c7b8885b0afe2a8 --- /dev/null +++ b/src/tests/p4/deploy_specs.sh @@ -0,0 +1,17 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device automation service compute monitoring webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" diff --git a/src/tests/p4/mininet/1switch1path.py b/src/tests/p4/mininet/1switch1path.py new file mode 100755 index 0000000000000000000000000000000000000000..466fb6a06e5b9ba8598614511c95ac4271d609e8 --- /dev/null +++ b/src/tests/p4/mininet/1switch1path.py @@ -0,0 +1,99 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + +class TutorialTopo(Topo): + """Basic Server-Client topology with IPv4 hosts""" + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Spines + # gRPC port 50001 + switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # IPv4 hosts attached to switch 1 + client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11", + ip='10.0.0.1/24', gw='10.0.0.100') +# client.sendCmd('arp -s 10.0.0.2 aa:bb:cc:dd:ee:22') +# client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22", + ip='10.0.0.2/24', gw='10.0.0.100') +# server.sendCmd('arp -s 10.0.0.1 aa:bb:cc:dd:ee:11') +# server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + self.addLink(client, switch1) # port 1 + self.addLink(server, switch1) # port 2 + + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + client = net.hosts[0] + server = net.hosts[1] + client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/src/tests/p4/mininet/2switch1path.py b/src/tests/p4/mininet/2switch1path.py new file mode 100755 index 0000000000000000000000000000000000000000..91db70052a95dea9b53d24dabb25b0feaa9935cf --- /dev/null +++ b/src/tests/p4/mininet/2switch1path.py @@ -0,0 +1,99 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + +class TutorialTopo(Topo): + """Basic Server-Client topology with IPv4 hosts""" + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Spines + # gRPC port 50001 + switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50002 + switch2 = self.addSwitch('switch2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # IPv4 hosts attached to switch 1 + client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11", + ip='10.0.0.1/24', gw='10.0.0.100') + server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22", + ip='10.0.0.2/24', gw='10.0.0.100') + self.addLink(client, switch1) # switch1: port 1 + self.addLink(switch1, switch2) # switch1: port 2 == switch2: port 1 + self.addLink(switch2, server) # switch2: port 2 + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + + client = net.hosts[0] + client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server = net.hosts[1] + server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/src/tests/p4/mininet/4switch2path.py b/src/tests/p4/mininet/4switch2path.py new file mode 100755 index 0000000000000000000000000000000000000000..d8ad04b0193a2b9b610a4d5f828891e575d8efe8 --- /dev/null +++ b/src/tests/p4/mininet/4switch2path.py @@ -0,0 +1,110 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + +class TutorialTopo(Topo): + """Basic Server-Client topology with IPv4 hosts""" + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Switches + # gRPC port 50001 + switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50002 + switch2 = self.addSwitch('switch2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50003 + switch3 = self.addSwitch('switch3', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50004 + switch4 = self.addSwitch('switch4', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # Hosts + client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11", + ip='10.0.0.1/24', gw='10.0.0.100') + server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22", + ip='10.0.0.2/24', gw='10.0.0.100') + + # Switch links + self.addLink(switch1, switch2) # Switch1:port 1, Switch2:port 1 + self.addLink(switch1, switch3) # Switch1:port 2, Switch3:port 1 + self.addLink(switch2, switch4) # Switch2:port 2, Switch4:port 1 + self.addLink(switch3, switch4) # Switch3:port 2, Switch4:port 2 + + # Host links + self.addLink(client, switch1) # Switch 1: port 3 + self.addLink(server, switch4) # Switch 4: port 3 + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + + client = net.hosts[0] + client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server = net.hosts[1] + server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/src/tests/p4/mininet/6switch2path.py b/src/tests/p4/mininet/6switch2path.py new file mode 100755 index 0000000000000000000000000000000000000000..8efb4b017f8c71e55884db8dd5f805820bb65fd6 --- /dev/null +++ b/src/tests/p4/mininet/6switch2path.py @@ -0,0 +1,118 @@ +#!/usr/bin/python + +# Copyright 2019-present Open Networking Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from mininet.cli import CLI +from mininet.log import setLogLevel +from mininet.net import Mininet +from mininet.node import Host +from mininet.topo import Topo +from stratum import StratumBmv2Switch + +CPU_PORT = 255 + +class IPv4Host(Host): + """Host that can be configured with an IPv4 gateway (default route). + """ + + def config(self, mac=None, ip=None, defaultRoute=None, lo='up', gw=None, + **_params): + super(IPv4Host, self).config(mac, ip, defaultRoute, lo, **_params) + self.cmd('ip -4 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -6 addr flush dev %s' % self.defaultIntf()) + self.cmd('ip -4 link set up %s' % self.defaultIntf()) + self.cmd('ip -4 addr add %s dev %s' % (ip, self.defaultIntf())) + if gw: + self.cmd('ip -4 route add default via %s' % gw) + # Disable offload + for attr in ["rx", "tx", "sg"]: + cmd = "/sbin/ethtool --offload %s %s off" % ( + self.defaultIntf(), attr) + self.cmd(cmd) + + def updateIP(): + return ip.split('/')[0] + + self.defaultIntf().updateIP = updateIP + +class TutorialTopo(Topo): + """Basic Server-Client topology with IPv4 hosts""" + + def __init__(self, *args, **kwargs): + Topo.__init__(self, *args, **kwargs) + + # Switches + # gRPC port 50001 + switch1 = self.addSwitch('switch1', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50002 + switch2 = self.addSwitch('switch2', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50003 + switch3 = self.addSwitch('switch3', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50004 + switch4 = self.addSwitch('switch4', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50005 + switch5 = self.addSwitch('switch5', cls=StratumBmv2Switch, cpuport=CPU_PORT) + # gRPC port 50006 + switch6 = self.addSwitch('switch6', cls=StratumBmv2Switch, cpuport=CPU_PORT) + + # Hosts + client = self.addHost('client', cls=IPv4Host, mac="aa:bb:cc:dd:ee:11", + ip='10.0.0.1/24', gw='10.0.0.100') + server = self.addHost('server', cls=IPv4Host, mac="aa:bb:cc:dd:ee:22", + ip='10.0.0.2/24', gw='10.0.0.100') + + # Switch links + self.addLink(switch1, switch2) # Switch1:port 1, Switch2:port 1 + self.addLink(switch1, switch3) # Switch1:port 2, Switch3:port 1 + + self.addLink(switch2, switch4) # Switch2:port 2, Switch4:port 1 + self.addLink(switch3, switch5) # Switch3:port 2, Switch5:port 1 + + self.addLink(switch4, switch6) # Switch4:port 2, Switch6:port 1 + self.addLink(switch5, switch6) # Switch5:port 2, Switch6:port 2 + + # Host links + self.addLink(client, switch1) # Switch1: port 3 + self.addLink(server, switch6) # Switch6: port 3 + +def main(): + net = Mininet(topo=TutorialTopo(), controller=None) + net.start() + + client = net.hosts[0] + client.setARP('10.0.0.2', 'aa:bb:cc:dd:ee:22') + server = net.hosts[1] + server.setARP('10.0.0.1', 'aa:bb:cc:dd:ee:11') + + CLI(net) + net.stop() + print '#' * 80 + print 'ATTENTION: Mininet was stopped! Perhaps accidentally?' + print 'No worries, it will restart automatically in a few seconds...' + print 'To access again the Mininet CLI, use `make mn-cli`' + print 'To detach from the CLI (without stopping), press Ctrl-D' + print 'To permanently quit Mininet, use `make stop`' + print '#' * 80 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Mininet topology script for 2x2 fabric with stratum_bmv2 and IPv4 hosts') + args = parser.parse_args() + setLogLevel('info') + + main() diff --git a/src/tests/p4/p4/bmv2.json b/src/tests/p4/p4/bmv2.json new file mode 100644 index 0000000000000000000000000000000000000000..f001eb52e90e875c4152f4d7820664402ac856c3 --- /dev/null +++ b/src/tests/p4/p4/bmv2.json @@ -0,0 +1,381 @@ +{ + "header_types" : [ + { + "name" : "scalars_0", + "id" : 0, + "fields" : [ + ["local_metadata_t.is_multicast", 1, false], + ["_padding_0", 7, false] + ] + }, + { + "name" : "standard_metadata", + "id" : 1, + "fields" : [ + ["ingress_port", 9, false], + ["egress_spec", 9, false], + ["egress_port", 9, false], + ["clone_spec", 32, false], + ["instance_type", 32, false], + ["drop", 1, false], + ["recirculate_port", 16, false], + ["packet_length", 32, false], + ["enq_timestamp", 32, false], + ["enq_qdepth", 19, false], + ["deq_timedelta", 32, false], + ["deq_qdepth", 19, false], + ["ingress_global_timestamp", 48, false], + ["egress_global_timestamp", 48, false], + ["lf_field_list", 32, false], + ["mcast_grp", 16, false], + ["resubmit_flag", 32, false], + ["egress_rid", 16, false], + ["recirculate_flag", 32, false], + ["checksum_error", 1, false], + ["parser_error", 32, false], + ["priority", 3, false], + ["_padding", 2, false] + ] + }, + { + "name" : "ethernet_t", + "id" : 2, + "fields" : [ + ["dst_addr", 48, false], + ["src_addr", 48, false], + ["ether_type", 16, false] + ] + } + ], + "headers" : [ + { + "name" : "scalars", + "id" : 0, + "header_type" : "scalars_0", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "standard_metadata", + "id" : 1, + "header_type" : "standard_metadata", + "metadata" : true, + "pi_omit" : true + }, + { + "name" : "ethernet", + "id" : 2, + "header_type" : "ethernet_t", + "metadata" : false, + "pi_omit" : true + } + ], + "header_stacks" : [], + "header_union_types" : [], + "header_unions" : [], + "header_union_stacks" : [], + "field_lists" : [], + "errors" : [ + ["NoError", 1], + ["PacketTooShort", 2], + ["NoMatch", 3], + ["StackOutOfBounds", 4], + ["HeaderTooShort", 5], + ["ParserTimeout", 6], + ["ParserInvalidArgument", 7] + ], + "enums" : [], + "parsers" : [ + { + "name" : "parser", + "id" : 0, + "init_state" : "start", + "parse_states" : [ + { + "name" : "start", + "id" : 0, + "parser_ops" : [ + { + "parameters" : [ + { + "type" : "regular", + "value" : "ethernet" + } + ], + "op" : "extract" + } + ], + "transitions" : [ + { + "value" : "default", + "mask" : null, + "next_state" : null + } + ], + "transition_key" : [] + } + ] + } + ], + "parse_vsets" : [], + "deparsers" : [ + { + "name" : "deparser", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 130, + "column" : 8, + "source_fragment" : "DeparserImpl" + }, + "order" : ["ethernet"] + } + ], + "meter_arrays" : [], + "counter_arrays" : [], + "register_arrays" : [], + "calculations" : [], + "learn_lists" : [], + "actions" : [ + { + "name" : "IngressPipeImpl.drop", + "id" : 0, + "runtime_data" : [], + "primitives" : [ + { + "op" : "mark_to_drop", + "parameters" : [ + { + "type" : "header", + "value" : "standard_metadata" + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 77, + "column" : 8, + "source_fragment" : "mark_to_drop(standard_metadata)" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_egress_port", + "id" : 1, + "runtime_data" : [ + { + "name" : "port", + "bitwidth" : 9 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "egress_spec"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 81, + "column" : 8, + "source_fragment" : "standard_metadata.egress_spec = port" + } + } + ] + }, + { + "name" : "IngressPipeImpl.set_multicast_group", + "id" : 2, + "runtime_data" : [ + { + "name" : "gid", + "bitwidth" : 16 + } + ], + "primitives" : [ + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["standard_metadata", "mcast_grp"] + }, + { + "type" : "runtime_data", + "value" : 0 + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 89, + "column" : 8, + "source_fragment" : "standard_metadata.mcast_grp = gid" + } + }, + { + "op" : "assign", + "parameters" : [ + { + "type" : "field", + "value" : ["scalars", "local_metadata_t.is_multicast"] + }, + { + "type" : "expression", + "value" : { + "type" : "expression", + "value" : { + "op" : "b2d", + "left" : null, + "right" : { + "type" : "bool", + "value" : true + } + } + } + } + ], + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 90, + "column" : 8, + "source_fragment" : "local_metadata.is_multicast = true" + } + } + ] + } + ], + "pipelines" : [ + { + "name" : "ingress", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 71, + "column" : 8, + "source_fragment" : "IngressPipeImpl" + }, + "init_table" : "IngressPipeImpl.l2_exact_table", + "tables" : [ + { + "name" : "IngressPipeImpl.l2_exact_table", + "id" : 0, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 95, + "column" : 10, + "source_fragment" : "l2_exact_table" + }, + "key" : [ + { + "match_type" : "exact", + "name" : "standard_metadata.ingress_port", + "target" : ["standard_metadata", "ingress_port"], + "mask" : null + } + ], + "match_type" : "exact", + "type" : "simple", + "max_size" : 1024, + "with_counters" : false, + "support_timeout" : false, + "direct_meters" : null, + "action_ids" : [1, 2, 0], + "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"], + "base_default_next" : null, + "next_tables" : { + "IngressPipeImpl.set_egress_port" : null, + "IngressPipeImpl.set_multicast_group" : null, + "IngressPipeImpl.drop" : null + }, + "default_entry" : { + "action_id" : 0, + "action_const" : true, + "action_data" : [], + "action_entry_const" : true + } + } + ], + "action_profiles" : [], + "conditionals" : [] + }, + { + "name" : "egress", + "id" : 1, + "source_info" : { + "filename" : "p4src/main.p4", + "line" : 116, + "column" : 8, + "source_fragment" : "EgressPipeImpl" + }, + "init_table" : null, + "tables" : [], + "action_profiles" : [], + "conditionals" : [] + } + ], + "checksums" : [], + "force_arith" : [], + "extern_instances" : [], + "field_aliases" : [ + [ + "queueing_metadata.enq_timestamp", + ["standard_metadata", "enq_timestamp"] + ], + [ + "queueing_metadata.enq_qdepth", + ["standard_metadata", "enq_qdepth"] + ], + [ + "queueing_metadata.deq_timedelta", + ["standard_metadata", "deq_timedelta"] + ], + [ + "queueing_metadata.deq_qdepth", + ["standard_metadata", "deq_qdepth"] + ], + [ + "intrinsic_metadata.ingress_global_timestamp", + ["standard_metadata", "ingress_global_timestamp"] + ], + [ + "intrinsic_metadata.egress_global_timestamp", + ["standard_metadata", "egress_global_timestamp"] + ], + [ + "intrinsic_metadata.lf_field_list", + ["standard_metadata", "lf_field_list"] + ], + [ + "intrinsic_metadata.mcast_grp", + ["standard_metadata", "mcast_grp"] + ], + [ + "intrinsic_metadata.resubmit_flag", + ["standard_metadata", "resubmit_flag"] + ], + [ + "intrinsic_metadata.egress_rid", + ["standard_metadata", "egress_rid"] + ], + [ + "intrinsic_metadata.recirculate_flag", + ["standard_metadata", "recirculate_flag"] + ], + [ + "intrinsic_metadata.priority", + ["standard_metadata", "priority"] + ] + ], + "program" : "p4src/main.p4", + "__meta__" : { + "version" : [2, 18], + "compiler" : "https://github.com/p4lang/p4c" + } +} \ No newline at end of file diff --git a/src/tests/p4/p4/main.p4 b/src/tests/p4/p4/main.p4 new file mode 100644 index 0000000000000000000000000000000000000000..843eb0d580e362e74b25c768b1b01e750138637a --- /dev/null +++ b/src/tests/p4/p4/main.p4 @@ -0,0 +1,144 @@ +/* + * Copyright 2019-present Open Networking Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include <core.p4> +#include <v1model.p4> + +typedef bit<9> port_num_t; +typedef bit<48> mac_addr_t; +typedef bit<16> mcast_group_id_t; + +//------------------------------------------------------------------------------ +// HEADER DEFINITIONS +//------------------------------------------------------------------------------ + +header ethernet_t { + mac_addr_t dst_addr; + mac_addr_t src_addr; + bit<16> ether_type; +} + +struct parsed_headers_t { + ethernet_t ethernet; +} + +struct local_metadata_t { + bool is_multicast; +} + + +//------------------------------------------------------------------------------ +// INGRESS PIPELINE +//------------------------------------------------------------------------------ + +parser ParserImpl (packet_in packet, + out parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) +{ + state start { + transition parse_ethernet; + } + + state parse_ethernet { + packet.extract(hdr.ethernet); + transition accept; + } +} + + +control VerifyChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t meta) +{ + apply { /* EMPTY */ } +} + + +control IngressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + + // Drop action shared by many tables. + action drop() { + mark_to_drop(standard_metadata); + } + + action set_egress_port(port_num_t port) { + standard_metadata.egress_spec = port; + } + + action set_multicast_group(mcast_group_id_t gid) { + // gid will be used by the Packet Replication Engine (PRE) in the + // Traffic Manager--located right after the ingress pipeline, to + // replicate a packet to multiple egress ports, specified by the control + // plane by means of P4Runtime MulticastGroupEntry messages. + standard_metadata.mcast_grp = gid; + local_metadata.is_multicast = true; + } + + // --- l2_exact_table ------------------ + + table l2_exact_table { + key = { + standard_metadata.ingress_port: exact; + } + actions = { + set_egress_port; + set_multicast_group; + @defaultonly drop; + } + const default_action = drop; + } + + apply { + l2_exact_table.apply(); + } +} + +//------------------------------------------------------------------------------ +// EGRESS PIPELINE +//------------------------------------------------------------------------------ + +control EgressPipeImpl (inout parsed_headers_t hdr, + inout local_metadata_t local_metadata, + inout standard_metadata_t standard_metadata) { + apply { /* EMPTY */ } +} + + +control ComputeChecksumImpl(inout parsed_headers_t hdr, + inout local_metadata_t local_metadata) +{ + apply { /* EMPTY */ } +} + + +control DeparserImpl(packet_out packet, in parsed_headers_t hdr) { + apply { + packet.emit(hdr.ethernet); + } +} + + +V1Switch( + ParserImpl(), + VerifyChecksumImpl(), + IngressPipeImpl(), + EgressPipeImpl(), + ComputeChecksumImpl(), + DeparserImpl() +) main; diff --git a/src/tests/p4/p4/p4info.txt b/src/tests/p4/p4/p4info.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b58e740864b72e6ca87582431cd7bd57894d0dd --- /dev/null +++ b/src/tests/p4/p4/p4info.txt @@ -0,0 +1,62 @@ +pkg_info { + arch: "v1model" +} +tables { + preamble { + id: 33605373 + name: "IngressPipeImpl.l2_exact_table" + alias: "l2_exact_table" + } + match_fields { + id: 1 + name: "standard_metadata.ingress_port" + bitwidth: 9 + match_type: EXACT + } + action_refs { + id: 16812802 + } + action_refs { + id: 16841371 + } + action_refs { + id: 16796182 + annotations: "@defaultonly" + scope: DEFAULT_ONLY + } + const_default_action_id: 16796182 + size: 1024 +} +actions { + preamble { + id: 16796182 + name: "IngressPipeImpl.drop" + alias: "drop" + } +} +actions { + preamble { + id: 16812802 + name: "IngressPipeImpl.set_egress_port" + alias: "set_egress_port" + } + params { + id: 1 + name: "port" + bitwidth: 9 + } +} +actions { + preamble { + id: 16841371 + name: "IngressPipeImpl.set_multicast_group" + alias: "set_multicast_group" + } + params { + id: 1 + name: "gid" + bitwidth: 16 + } +} +type_info { +} diff --git a/src/tests/p4/run_test_01_bootstrap.sh b/src/tests/p4/run_test_01_bootstrap.sh new file mode 100755 index 0000000000000000000000000000000000000000..a58fd50a762b99f7c8043931f89e087e8fbda6c3 --- /dev/null +++ b/src/tests/p4/run_test_01_bootstrap.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# make sure to source the following scripts: +# - my_deploy.sh +# - tfs_runtime_env_vars.sh + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/p4/tests/test_functional_bootstrap.py + diff --git a/src/tests/p4/run_test_02_create_service.sh b/src/tests/p4/run_test_02_create_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..203c0d5a6dcac35c5355a3d66da0794aa30ad6cc --- /dev/null +++ b/src/tests/p4/run_test_02_create_service.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/p4/tests/test_functional_create_service.py diff --git a/src/tests/p4/run_test_03_delete_service.sh b/src/tests/p4/run_test_03_delete_service.sh new file mode 100755 index 0000000000000000000000000000000000000000..8ac52c6f647b866ada0887f8027d2a92dd230700 --- /dev/null +++ b/src/tests/p4/run_test_03_delete_service.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/p4/tests/test_functional_delete_service.py diff --git a/src/tests/p4/run_test_04_cleanup.sh b/src/tests/p4/run_test_04_cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..64cd60f95dbe092c9be125b53a89a6536b6860e0 --- /dev/null +++ b/src/tests/p4/run_test_04_cleanup.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python -m pytest --verbose src/tests/p4/tests/test_functional_cleanup.py diff --git a/src/tests/p4/setup.sh b/src/tests/p4/setup.sh new file mode 100755 index 0000000000000000000000000000000000000000..3ff7e0393d0cd87491bf4ef1db9021351502f5a8 --- /dev/null +++ b/src/tests/p4/setup.sh @@ -0,0 +1,8 @@ +#! /bin/bash + +export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') + +kubectl exec ${POD_NAME} -n=tfs -- mkdir /root/p4 + +kubectl cp src/tests/p4/p4/p4info.txt tfs/${POD_NAME}:/root/p4 +kubectl cp src/tests/p4/p4/bmv2.json tfs/${POD_NAME}:/root/p4 diff --git a/src/tests/p4/tests/.gitignore b/src/tests/p4/tests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..76cb708d1b532c9b69166e55f36bcb912fd5e370 --- /dev/null +++ b/src/tests/p4/tests/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +Credentials.py diff --git a/src/tests/p4/tests/BuildDescriptors.py b/src/tests/p4/tests/BuildDescriptors.py new file mode 100644 index 0000000000000000000000000000000000000000..5c5419190487eb5089e4a30f523dca43fa3870f2 --- /dev/null +++ b/src/tests/p4/tests/BuildDescriptors.py @@ -0,0 +1,35 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, json, sys +from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +def main(): + with open('tests/ofc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f: + devices = [] + for device,connect_rules in DEVICES: + device = copy.deepcopy(device) + device['device_config']['config_rules'].extend(connect_rules) + devices.append(device) + + f.write(json.dumps({ + 'contexts': CONTEXTS, + 'topologies': TOPOLOGIES, + 'devices': devices, + 'links': LINKS + })) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/ofc22/tests/LoadDescriptors.py b/src/tests/p4/tests/LoadDescriptors.py similarity index 100% rename from src/tests/ofc22/tests/LoadDescriptors.py rename to src/tests/p4/tests/LoadDescriptors.py diff --git a/src/tests/p4/tests/Objects.py b/src/tests/p4/tests/Objects.py new file mode 100644 index 0000000000000000000000000000000000000000..0473207a87ba9ea5c74b45d983db185f8c541cbf --- /dev/null +++ b/src/tests/p4/tests/Objects.py @@ -0,0 +1,345 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Dict, List, Tuple +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_connect_rules, json_device_emulated_connect_rules, json_device_emulated_packet_router_disabled, + json_device_connect_rules, json_device_id, json_device_p4_disabled, + json_device_emulated_tapi_disabled, json_device_id, json_device_packetrouter_disabled, json_device_tapi_disabled) +from common.tools.object_factory.Service import ( + get_service_uuid, json_service_l3nm_planned,json_service_p4_planned) +from common.tools.object_factory.ConfigRule import ( + json_config_rule_set, json_config_rule_delete) +from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_ids, json_endpoints, json_endpoint_id +from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id +from common.tools.object_factory.Topology import json_topology, json_topology_id +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + +# ----- Topology ------------------------------------------------------------------------------------------------------- +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) + +# ----- Monitoring Samples --------------------------------------------------------------------------------------------- +PACKET_PORT_SAMPLE_TYPES = [ + KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, + KpiSampleType.KPISAMPLETYPE_BYTES_TRANSMITTED, + KpiSampleType.KPISAMPLETYPE_BYTES_RECEIVED, +] + +# ----- Device Credentials and Settings -------------------------------------------------------------------------------- + + +# ----- Devices -------------------------------------------------------------------------------------------------------- + +CUR_PATH = os.path.dirname(os.path.abspath(__file__)) + +DEVICE_SW1_UUID = 'SW1' +DEVICE_SW1_TIMEOUT = 60 +DEVICE_SW1_ID = json_device_id(DEVICE_SW1_UUID) +DEVICE_SW1 = json_device_p4_disabled(DEVICE_SW1_UUID) + +DEVICE_SW1_DPID = 1 +DEVICE_SW1_NAME = DEVICE_SW1_UUID +DEVICE_SW1_IP_ADDR = '10.0.2.10' +DEVICE_SW1_PORT = '50001' +DEVICE_SW1_VENDOR = 'Open Networking Foundation' +DEVICE_SW1_HW_VER = 'BMv2 simple_switch' +DEVICE_SW1_SW_VER = 'Stratum' + +DEVICE_SW1_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW1_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW1_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', []), ('3', 'port', [])] +DEVICE_SW1_ENDPOINTS = json_endpoints(DEVICE_SW1_ID, DEVICE_SW1_ENDPOINT_DEFS) +DEVICE_SW1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW1_ID, DEVICE_SW1_ENDPOINT_DEFS) +ENDPOINT_ID_SW1_1 = DEVICE_SW1_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW1_2 = DEVICE_SW1_ENDPOINTS[1]['endpoint_id'] +ENDPOINT_ID_SW1_3 = DEVICE_SW1_ENDPOINTS[2]['endpoint_id'] + +DEVICE_SW1_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW1_IP_ADDR, + DEVICE_SW1_PORT, + { + 'id': DEVICE_SW1_DPID, + 'name': DEVICE_SW1_NAME, + 'vendor': DEVICE_SW1_VENDOR, + 'hw_ver': DEVICE_SW1_HW_VER, + 'sw_ver': DEVICE_SW1_SW_VER, + 'timeout': DEVICE_SW1_TIMEOUT, + 'p4bin': DEVICE_SW1_BIN_PATH, + 'p4info': DEVICE_SW1_INFO_PATH + } +) + +DEVICE_SW2_UUID = 'SW2' +DEVICE_SW2_TIMEOUT = 60 +DEVICE_SW2_ID = json_device_id(DEVICE_SW2_UUID) +DEVICE_SW2 = json_device_p4_disabled(DEVICE_SW2_UUID) + +DEVICE_SW2_DPID = 1 +DEVICE_SW2_NAME = DEVICE_SW2_UUID +DEVICE_SW2_IP_ADDR = '10.0.2.10' +DEVICE_SW2_PORT = '50002' +DEVICE_SW2_VENDOR = 'Open Networking Foundation' +DEVICE_SW2_HW_VER = 'BMv2 simple_switch' +DEVICE_SW2_SW_VER = 'Stratum' + +DEVICE_SW2_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW2_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW2_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', [])] +DEVICE_SW2_ENDPOINTS = json_endpoints(DEVICE_SW2_ID, DEVICE_SW2_ENDPOINT_DEFS) +DEVICE_SW2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW2_ID, DEVICE_SW2_ENDPOINT_DEFS) +ENDPOINT_ID_SW2_1 = DEVICE_SW2_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW2_2 = DEVICE_SW2_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW2_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW2_IP_ADDR, + DEVICE_SW2_PORT, + { + 'id': DEVICE_SW2_DPID, + 'name': DEVICE_SW2_NAME, + 'vendor': DEVICE_SW2_VENDOR, + 'hw_ver': DEVICE_SW2_HW_VER, + 'sw_ver': DEVICE_SW2_SW_VER, + 'timeout': DEVICE_SW2_TIMEOUT, + 'p4bin': DEVICE_SW2_BIN_PATH, + 'p4info': DEVICE_SW2_INFO_PATH + } +) + +DEVICE_SW3_UUID = 'SW3' +DEVICE_SW3_TIMEOUT = 60 +DEVICE_SW3_ID = json_device_id(DEVICE_SW3_UUID) +DEVICE_SW3 = json_device_p4_disabled(DEVICE_SW3_UUID) + +DEVICE_SW3_DPID = 1 +DEVICE_SW3_NAME = DEVICE_SW3_UUID +DEVICE_SW3_IP_ADDR = '10.0.2.10' +DEVICE_SW3_PORT = '50003' +DEVICE_SW3_VENDOR = 'Open Networking Foundation' +DEVICE_SW3_HW_VER = 'BMv2 simple_switch' +DEVICE_SW3_SW_VER = 'Stratum' + +DEVICE_SW3_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW3_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW3_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', [])] +DEVICE_SW3_ENDPOINTS = json_endpoints(DEVICE_SW3_ID, DEVICE_SW3_ENDPOINT_DEFS) +DEVICE_SW3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW3_ID, DEVICE_SW3_ENDPOINT_DEFS) +ENDPOINT_ID_SW3_1 = DEVICE_SW3_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW3_2 = DEVICE_SW3_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW3_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW3_IP_ADDR, + DEVICE_SW3_PORT, + { + 'id': DEVICE_SW3_DPID, + 'name': DEVICE_SW3_NAME, + 'vendor': DEVICE_SW3_VENDOR, + 'hw_ver': DEVICE_SW3_HW_VER, + 'sw_ver': DEVICE_SW3_SW_VER, + 'timeout': DEVICE_SW3_TIMEOUT, + 'p4bin': DEVICE_SW3_BIN_PATH, + 'p4info': DEVICE_SW3_INFO_PATH + } +) + +DEVICE_SW4_UUID = 'SW4' +DEVICE_SW4_TIMEOUT = 60 +DEVICE_SW4_ID = json_device_id(DEVICE_SW4_UUID) +DEVICE_SW4 = json_device_p4_disabled(DEVICE_SW4_UUID) + +DEVICE_SW4_DPID = 1 +DEVICE_SW4_NAME = DEVICE_SW4_UUID +DEVICE_SW4_IP_ADDR = '10.0.2.10' +DEVICE_SW4_PORT = '50004' +DEVICE_SW4_VENDOR = 'Open Networking Foundation' +DEVICE_SW4_HW_VER = 'BMv2 simple_switch' +DEVICE_SW4_SW_VER = 'Stratum' + +DEVICE_SW4_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW4_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW4_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', [])] +DEVICE_SW4_ENDPOINTS = json_endpoints(DEVICE_SW4_ID, DEVICE_SW4_ENDPOINT_DEFS) +DEVICE_SW4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW4_ID, DEVICE_SW4_ENDPOINT_DEFS) +ENDPOINT_ID_SW4_1 = DEVICE_SW4_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW4_2 = DEVICE_SW4_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW4_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW4_IP_ADDR, + DEVICE_SW4_PORT, + { + 'id': DEVICE_SW4_DPID, + 'name': DEVICE_SW4_NAME, + 'vendor': DEVICE_SW4_VENDOR, + 'hw_ver': DEVICE_SW4_HW_VER, + 'sw_ver': DEVICE_SW4_SW_VER, + 'timeout': DEVICE_SW4_TIMEOUT, + 'p4bin': DEVICE_SW4_BIN_PATH, + 'p4info': DEVICE_SW4_INFO_PATH + } +) + +DEVICE_SW5_UUID = 'SW5' +DEVICE_SW5_TIMEOUT = 60 +DEVICE_SW5_ID = json_device_id(DEVICE_SW5_UUID) +DEVICE_SW5 = json_device_p4_disabled(DEVICE_SW5_UUID) + +DEVICE_SW5_DPID = 1 +DEVICE_SW5_NAME = DEVICE_SW5_UUID +DEVICE_SW5_IP_ADDR = '10.0.2.10' +DEVICE_SW5_PORT = '50005' +DEVICE_SW5_VENDOR = 'Open Networking Foundation' +DEVICE_SW5_HW_VER = 'BMv2 simple_switch' +DEVICE_SW5_SW_VER = 'Stratum' + +DEVICE_SW5_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW5_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW5_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', [])] +DEVICE_SW5_ENDPOINTS = json_endpoints(DEVICE_SW5_ID, DEVICE_SW5_ENDPOINT_DEFS) +DEVICE_SW5_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW5_ID, DEVICE_SW5_ENDPOINT_DEFS) +ENDPOINT_ID_SW5_1 = DEVICE_SW5_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW5_2 = DEVICE_SW5_ENDPOINTS[1]['endpoint_id'] + +DEVICE_SW5_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW5_IP_ADDR, + DEVICE_SW5_PORT, + { + 'id': DEVICE_SW5_DPID, + 'name': DEVICE_SW5_NAME, + 'vendor': DEVICE_SW5_VENDOR, + 'hw_ver': DEVICE_SW5_HW_VER, + 'sw_ver': DEVICE_SW5_SW_VER, + 'timeout': DEVICE_SW5_TIMEOUT, + 'p4bin': DEVICE_SW5_BIN_PATH, + 'p4info': DEVICE_SW5_INFO_PATH + } +) + +DEVICE_SW6_UUID = 'SW6' +DEVICE_SW6_TIMEOUT = 60 +DEVICE_SW6_ID = json_device_id(DEVICE_SW6_UUID) +DEVICE_SW6 = json_device_p4_disabled(DEVICE_SW6_UUID) + +DEVICE_SW6_DPID = 1 +DEVICE_SW6_NAME = DEVICE_SW6_UUID +DEVICE_SW6_IP_ADDR = '10.0.2.10' +DEVICE_SW6_PORT = '50006' +DEVICE_SW6_VENDOR = 'Open Networking Foundation' +DEVICE_SW6_HW_VER = 'BMv2 simple_switch' +DEVICE_SW6_SW_VER = 'Stratum' + +DEVICE_SW6_BIN_PATH = '/root/p4/bmv2.json' +DEVICE_SW6_INFO_PATH = '/root/p4/p4info.txt' + +DEVICE_SW6_ENDPOINT_DEFS = [('1', 'port', []), ('2', 'port', []), ('3', 'port', [])] +DEVICE_SW6_ENDPOINTS = json_endpoints(DEVICE_SW6_ID, DEVICE_SW6_ENDPOINT_DEFS) +DEVICE_SW6_ENDPOINT_IDS = json_endpoint_ids(DEVICE_SW6_ID, DEVICE_SW6_ENDPOINT_DEFS) +ENDPOINT_ID_SW6_1 = DEVICE_SW6_ENDPOINTS[0]['endpoint_id'] +ENDPOINT_ID_SW6_2 = DEVICE_SW6_ENDPOINTS[1]['endpoint_id'] +ENDPOINT_ID_SW6_3 = DEVICE_SW6_ENDPOINTS[2]['endpoint_id'] + +DEVICE_SW6_CONNECT_RULES = json_device_connect_rules( + DEVICE_SW6_IP_ADDR, + DEVICE_SW6_PORT, + { + 'id': DEVICE_SW6_DPID, + 'name': DEVICE_SW6_NAME, + 'vendor': DEVICE_SW6_VENDOR, + 'hw_ver': DEVICE_SW6_HW_VER, + 'sw_ver': DEVICE_SW6_SW_VER, + 'timeout': DEVICE_SW6_TIMEOUT, + 'p4bin': DEVICE_SW6_BIN_PATH, + 'p4info': DEVICE_SW6_INFO_PATH + } +) + +# ----- Links ---------------------------------------------------------------------------------------------------------- +LINK_SW1_SW2_UUID = get_link_uuid(ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW2_1) +LINK_SW1_SW2_ID = json_link_id(LINK_SW1_SW2_UUID) +LINK_SW1_SW2 = json_link(LINK_SW1_SW2_UUID, [ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW2_1]) + +LINK_SW1_SW3_UUID = get_link_uuid(ENDPOINT_ID_SW1_2, ENDPOINT_ID_SW3_1) +LINK_SW1_SW3_ID = json_link_id(LINK_SW1_SW3_UUID) +LINK_SW1_SW3 = json_link(LINK_SW1_SW3_UUID, [ENDPOINT_ID_SW1_2, ENDPOINT_ID_SW3_1]) + +LINK_SW2_SW4_UUID = get_link_uuid(ENDPOINT_ID_SW2_2, ENDPOINT_ID_SW4_1) +LINK_SW2_SW4_ID = json_link_id(LINK_SW2_SW4_UUID) +LINK_SW2_SW4 = json_link(LINK_SW2_SW4_UUID, [ENDPOINT_ID_SW2_2, ENDPOINT_ID_SW4_1]) + +LINK_SW3_SW5_UUID = get_link_uuid(ENDPOINT_ID_SW3_2, ENDPOINT_ID_SW5_1) +LINK_SW3_SW5_ID = json_link_id(LINK_SW3_SW5_UUID) +LINK_SW3_SW5 = json_link(LINK_SW3_SW5_UUID, [ENDPOINT_ID_SW3_2, ENDPOINT_ID_SW5_1]) + +LINK_SW4_SW6_UUID = get_link_uuid(ENDPOINT_ID_SW4_2, ENDPOINT_ID_SW6_1) +LINK_SW4_SW6_ID = json_link_id(LINK_SW4_SW6_UUID) +LINK_SW4_SW6 = json_link(LINK_SW4_SW6_UUID, [ENDPOINT_ID_SW4_2, ENDPOINT_ID_SW6_1]) + +LINK_SW5_SW6_UUID = get_link_uuid(ENDPOINT_ID_SW5_2, ENDPOINT_ID_SW6_2) +LINK_SW5_SW6_ID = json_link_id(LINK_SW5_SW6_UUID) +LINK_SW5_SW6 = json_link(LINK_SW5_SW6_UUID, [ENDPOINT_ID_SW5_2, ENDPOINT_ID_SW6_2]) + +# ----- Service ---------------------------------------------------------------------------------------------------------- + +#SERVICE_SW1_UUID = get_service_uuid(ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW1_2) +#SERVICE_SW1 = json_service_p4_planned(SERVICE_SW1_UUID) + +#SERVICE_SW2_UUID = get_service_uuid(ENDPOINT_ID_SW2_1, ENDPOINT_ID_SW2_2) +#SERVICE_SW2 = json_service_p4_planned(SERVICE_SW2_UUID) + +SERVICE_SW1_SW6_UUID = get_service_uuid(ENDPOINT_ID_SW1_3, ENDPOINT_ID_SW6_3) +SERVICE_SW1_SW6 = json_service_p4_planned(SERVICE_SW1_SW6_UUID) +SERVICE_SW1_SW6_ENDPOINT_IDS = [DEVICE_SW1_ENDPOINT_IDS[2], DEVICE_SW6_ENDPOINT_IDS[2]] + +# ----- Object Collections --------------------------------------------------------------------------------------------- + +CONTEXTS = [CONTEXT] +TOPOLOGIES = [TOPOLOGY] + +DEVICES = [ + (DEVICE_SW1, DEVICE_SW1_CONNECT_RULES, DEVICE_SW1_ENDPOINTS), + (DEVICE_SW2, DEVICE_SW2_CONNECT_RULES, DEVICE_SW2_ENDPOINTS), + (DEVICE_SW3, DEVICE_SW3_CONNECT_RULES, DEVICE_SW3_ENDPOINTS), + (DEVICE_SW4, DEVICE_SW4_CONNECT_RULES, DEVICE_SW4_ENDPOINTS), + (DEVICE_SW5, DEVICE_SW5_CONNECT_RULES, DEVICE_SW5_ENDPOINTS), + (DEVICE_SW6, DEVICE_SW6_CONNECT_RULES, DEVICE_SW6_ENDPOINTS), +] + +LINKS = [ + LINK_SW1_SW2, + LINK_SW1_SW3, + + LINK_SW2_SW4, + LINK_SW3_SW5, + + LINK_SW4_SW6, + LINK_SW5_SW6 + ] + +#SERVICES = [(SERVICE_SW1, DEVICE_SW1_ENDPOINT_IDS), (SERVICE_SW2, DEVICE_SW2_ENDPOINT_IDS)] + +#SERVICE_SW1_SW2_ENDPOINT_IDS = DEVICE_SW1_ENDPOINT_IDS + DEVICE_SW2_ENDPOINT_IDS + +SERVICES = [(SERVICE_SW1_SW6, SERVICE_SW1_SW6_ENDPOINT_IDS)] \ No newline at end of file diff --git a/src/tests/p4/tests/__init__.py b/src/tests/p4/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/p4/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/p4/tests/test_functional_bootstrap.py b/src/tests/p4/tests/test_functional_bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..793d80c7bf97e9f01a7ba968c8ea1c654d1f4a93 --- /dev/null +++ b/src/tests/p4/tests/test_functional_bootstrap.py @@ -0,0 +1,107 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, Context, ContextId, Device, Empty, Link, Topology, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name + + # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- + for context in CONTEXTS: + context_uuid = context['context_id']['context_uuid']['uuid'] + LOGGER.info('Adding Context {:s}'.format(context_uuid)) + response = context_client.SetContext(Context(**context)) + assert response.context_uuid.uuid == context_uuid + + for topology in TOPOLOGIES: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + topology_uuid = topology['topology_id']['topology_uuid']['uuid'] + LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + response = context_client.SetTopology(Topology(**topology)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.topology_uuid.uuid == topology_uuid + context_id = json_context_id(context_uuid) + + +def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + +def test_devices_bootstraping( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Create Devices --------------------------------------------------------------- + for device, connect_rules, endpoints, in DEVICES: + device_uuid = device['device_id']['device_uuid']['uuid'] + LOGGER.info('Adding Device {:s}'.format(device_uuid)) + + device_p4_with_connect_rules = copy.deepcopy(device) + device_p4_with_connect_rules['device_config']['config_rules'].extend(connect_rules) + response = device_client.AddDevice(Device(**device_p4_with_connect_rules)) + assert response.device_uuid.uuid == device_uuid + + device_p4_with_endpoints = copy.deepcopy(device) + device_p4_with_endpoints['device_endpoints'].extend(endpoints) + device_client.ConfigureDevice(Device(**device_p4_with_endpoints)) + + for link in LINKS: + link_uuid = link['link_id']['link_uuid']['uuid'] + LOGGER.info('Adding Link {:s}'.format(link_uuid)) + response = context_client.SetLink(Link(**link)) + assert response.link_uuid.uuid == link_uuid + context_client.SetLink(Link(**link)) + +def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) diff --git a/src/tests/p4/tests/test_functional_cleanup.py b/src/tests/p4/tests/test_functional_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..3dab4f84fdabbc7370de9af0f8e9a69754d310f6 --- /dev/null +++ b/src/tests/p4/tests/test_functional_cleanup.py @@ -0,0 +1,83 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, Link, LinkId, TopologyId, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_scenario_cleanup( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + for link in LINKS: + link_uuid = link['link_id']['link_uuid']['uuid'] + LOGGER.info('Removing Link {:s}'.format(link_uuid)) + link_id = link['link_id'] + context_client.RemoveLink(LinkId(**link_id)) + + # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- + for device, _, _ in DEVICES: + + device_id = device['device_id'] + device_uuid = device_id['device_uuid']['uuid'] + LOGGER.info('Deleting Device {:s}'.format(device_uuid)) + device_client.DeleteDevice(DeviceId(**device_id)) + #expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + + # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ + for topology in TOPOLOGIES: + topology_id = topology['topology_id'] + context_uuid = topology_id['context_id']['context_uuid']['uuid'] + topology_uuid = topology_id['topology_uuid']['uuid'] + LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + context_client.RemoveTopology(TopologyId(**topology_id)) + context_id = json_context_id(context_uuid) + #expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) + + # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- + for context in CONTEXTS: + context_id = context['context_id'] + context_uuid = context_id['context_uuid']['uuid'] + LOGGER.info('Deleting Context {:s}'.format(context_uuid)) + context_client.RemoveContext(ContextId(**context_id)) + #expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) diff --git a/src/tests/p4/tests/test_functional_create_service.py b/src/tests/p4/tests/test_functional_create_service.py new file mode 100644 index 0000000000000000000000000000000000000000..96d16a29990b857f83ec946cbdee9ac3b88de717 --- /dev/null +++ b/src/tests/p4/tests/test_functional_create_service.py @@ -0,0 +1,93 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.p4.tests.Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES +from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId,\ + DeviceOperationalStatusEnum + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_rules_entry( + context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name + + + + for device, _, __ in DEVICES: + # Enable device + device_p4_with_operational_status = copy.deepcopy(device) + device_p4_with_operational_status['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) + + # ----- Create Services --------------------------------------------------------------- + for service, endpoints in SERVICES: + # Insert Service (table entries) + service_uuid = service['service_id']['service_uuid']['uuid'] + print('Creating Service {:s}'.format(service_uuid)) + service_p4 = copy.deepcopy(service) + service_client.CreateService(Service(**service_p4)) + service_p4['service_endpoint_ids'].extend(endpoints) + service_client.UpdateService(Service(**service_p4)) + + + +""" +con_cl = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) +dev_cl = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) +srv_cl = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + +for service, endpoints in SERVICES: + service_uuid = service['service_id']['service_uuid']['uuid'] + print('Creating Service {:s}'.format(service_uuid)) + service_p4 = copy.deepcopy(service) + srv_cl.CreateService(Service(**service_p4)) + #service_data = con_cl.GetService(ServiceId(**json_service_id('svc1'))) + #print('service_data = {:s}'.format(grpc_message_to_json_string(service_data))) + service_p4 = copy.deepcopy(service) + service_p4['service_endpoint_ids'].extend(endpoints) + srv_cl.UpdateService(Service(**service_p4)) +""" \ No newline at end of file diff --git a/src/tests/p4/tests/test_functional_delete_service.py b/src/tests/p4/tests/test_functional_delete_service.py new file mode 100644 index 0000000000000000000000000000000000000000..8630686c84259abd242c7b9c2ee65c45b61eb8d4 --- /dev/null +++ b/src/tests/p4/tests/test_functional_delete_service.py @@ -0,0 +1,69 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from common.proto.context_pb2 import ConfigActionEnum, ContextId, Device, DeviceId, Empty, LinkId, TopologyId, Service, ServiceId, DeviceOperationalStatusEnum +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, SERVICES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(get_setting('SERVICESERVICE_SERVICE_HOST'), get_setting('SERVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + +def test_rules_delete( + context_client : ContextClient, device_client : DeviceClient, service_client : ServiceClient): # pylint: disable=redefined-outer-name + + # ----- Create Services --------------------------------------------------------------- + for service, endpoints in SERVICES: + # Delete Service (table entries) + service_uuid = service['service_id']['service_uuid']['uuid'] + print('Deleting Service {:s}'.format(service_uuid)) + service_p4 = copy.deepcopy(service) + response = service_client.DeleteService(ServiceId(**json_service_id(service_uuid, CONTEXT_ID))) + + # ----- Disable Devices --------------------------------------------------------------- + for device, _, _ in DEVICES: + device_p4_with_operational_status = copy.deepcopy(device) + device_p4_with_operational_status['device_operational_status'] = \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED + device_client.ConfigureDevice(Device(**device_p4_with_operational_status)) diff --git a/src/tests/scenario2/.gitignore b/src/tests/scenario2/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0a3f4400d5c88b1af32c7667d69d2fdc12d5424e --- /dev/null +++ b/src/tests/scenario2/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +descriptors_real.json diff --git a/src/tests/scenario2/MultiIngressController.txt b/src/tests/scenario2/MultiIngressController.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2d6d322465cb1d776b043e5de4dd474d2f0d9c6 --- /dev/null +++ b/src/tests/scenario2/MultiIngressController.txt @@ -0,0 +1,35 @@ +# Ref: https://kubernetes.github.io/ingress-nginx/user-guide/multiple-ingress/ +# Ref: https://fabianlee.org/2021/07/29/kubernetes-microk8s-with-multiple-metallb-endpoints-and-nginx-ingress-controllers/ + +# Check node limits +kubectl describe nodes + +# Create secondary ingress controllers +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +source nfvsdn22/deploy_specs_dom1.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom2.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom3.sh +./deploy.sh + +source nfvsdn22/deploy_specs_dom4.sh +./deploy.sh + +# Manually deploy ingresses for domains +kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml +kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml +kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml +kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml diff --git a/src/tests/scenario2/Scenario.md b/src/tests/scenario2/Scenario.md new file mode 100644 index 0000000000000000000000000000000000000000..8dad4691ade669522b5c82a5e4ed07e5d0279492 --- /dev/null +++ b/src/tests/scenario2/Scenario.md @@ -0,0 +1,47 @@ +# Scenario: + +- 4 TFS instances + + - domain D1 (source for e-2-e service) + 5 routers + 1 DC + R1@D1/2 <--> R2@D1/1 + R2@D1/3 <--> R3@D1/2 + R2@D1/5 <--> R5@D1/2 + R3@D1/4 <--> R4@D1/3 + R4@D1/5 <--> R5@D1/4 + R5@D1/1 <--> R1@D1/5 + R1@D1/100 <--> DCGW@D1/eth1 + + - domain D2 (transit for e-2-e service) + 6 routers + R1@D2/2 <--> R2@D2/1 + R1@D2/6 <--> R6@D2/1 + R1@D2/5 <--> R5@D2/1 + R2@D2/3 <--> R3@D2/2 + R2@D2/4 <--> R4@D2/2 + R2@D2/5 <--> R5@D2/2 + R2@D2/6 <--> R6@D2/2 + R3@D2/6 <--> R6@D2/3 + R4@D2/5 <--> R5@D2/4 + + - domain D3 (transit for e-2-e service) + 4 routers + R1@D3/2 <--> R2@D3/1 + R2@D3/3 <--> R3@D3/2 + R3@D3/4 <--> R4@D3/3 + R4@D3/1 <--> R1@D3/4 + R2@D3/4 <--> R4@D3/2 + + - domain D4 (end for e-2-e service) + 3 routers + R1@D4/2 <--> R2@D4/1 + R1@D4/3 <--> R3@D4/1 + R2@D4/3 <--> R3@D4/2 + R3@D4/100 <--> DCGW@D4/eth1 + + - interdomain links + R4@D1/10 <--> R1@D2/10 + R5@D1/10 <--> R1@D3/10 + R4@D2/10 <--> R2@D4/10 + R5@D2/10 <--> R2@D3/10 + R3@D3/10 <--> R1@D4/10 diff --git a/src/tests/scenario2/__init__.py b/src/tests/scenario2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/scenario2/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/scenario2/delete_all.sh b/src/tests/scenario2/delete_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..5d3e55831c85a3ef547d8e02a29f507663bfa789 --- /dev/null +++ b/src/tests/scenario2/delete_all.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Delete old namespaces +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 tfs-bchain + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml diff --git a/src/tests/scenario2/deploy_all.sh b/src/tests/scenario2/deploy_all.sh new file mode 100755 index 0000000000000000000000000000000000000000..582a97ac57f624de93e5865b7dcb190a6797bd5b --- /dev/null +++ b/src/tests/scenario2/deploy_all.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Delete old namespaces +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 + +# Delete secondary ingress controllers +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Delete MockBlockchain +#kubectl delete namespace tfs-bchain + +# Create secondary ingress controllers +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml + +# Create MockBlockchain +#./deploy_mock_blockchain.sh + +# Deploy TFS for Domain 1 +source nfvsdn22/deploy_specs_dom1.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom1.sh + +# Deploy TFS for Domain 2 +source nfvsdn22/deploy_specs_dom2.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom2.sh + +# Deploy TFS for Domain 3 +source nfvsdn22/deploy_specs_dom3.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom3.sh + +# Deploy TFS for Domain 4 +source nfvsdn22/deploy_specs_dom4.sh +./deploy.sh +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_dom4.sh diff --git a/src/tests/scenario2/deploy_specs_dom1.sh b/src/tests/scenario2/deploy_specs_dom1.sh new file mode 100644 index 0000000000000000000000000000000000000000..06d32e005f36d883c44d195ccfd20ec9b7e9a4b8 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom1.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom1" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom1.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="NO" diff --git a/src/tests/scenario2/deploy_specs_dom2.sh b/src/tests/scenario2/deploy_specs_dom2.sh new file mode 100644 index 0000000000000000000000000000000000000000..df1726cd31606ada5d2a33d50550b52c02ccbee4 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom2.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom2" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom2.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/deploy_specs_dom3.sh b/src/tests/scenario2/deploy_specs_dom3.sh new file mode 100644 index 0000000000000000000000000000000000000000..027762e3e70d0d1cd76b8d3303ae17c97ea781c7 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom3.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom3" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom3.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/deploy_specs_dom4.sh b/src/tests/scenario2/deploy_specs_dom4.sh new file mode 100644 index 0000000000000000000000000000000000000000..a09e9fa899a0ca9fc941fd09496113a20aebbe59 --- /dev/null +++ b/src/tests/scenario2/deploy_specs_dom4.sh @@ -0,0 +1,21 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice dlt interdomain webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs-dom4" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="nfvsdn22/tfs-ingress-dom4.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# If not already set, disable skip-build flag. +# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used. +export TFS_SKIP_BUILD="YES" diff --git a/src/tests/scenario2/descriptors/domain1.json b/src/tests/scenario2/descriptors/domain1.json new file mode 100644 index 0000000000000000000000000000000000000000..043b3955f017631203a437cf853c3617cddf93c8 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain1.json @@ -0,0 +1,148 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}, + "device_ids": [ + {"device_uuid": {"uuid": "DC1"}}, + {"device_uuid": {"uuid": "R1@D1"}}, + {"device_uuid": {"uuid": "R2@D1"}}, + {"device_uuid": {"uuid": "R3@D1"}}, + {"device_uuid": {"uuid": "R4@D1"}}, + {"device_uuid": {"uuid": "R5@D1"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, + {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, + {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, + {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, + {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, + {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, + {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC1"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/border", "uuid": "D1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "int"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "DC1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5@D1"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain2.json b/src/tests/scenario2/descriptors/domain2.json new file mode 100644 index 0000000000000000000000000000000000000000..81d397abfd3571b1177a06172188b00eed2f3afc --- /dev/null +++ b/src/tests/scenario2/descriptors/domain2.json @@ -0,0 +1,166 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}, + "device_ids": [ + {"device_uuid": {"uuid": "R1@D2"}}, + {"device_uuid": {"uuid": "R2@D2"}}, + {"device_uuid": {"uuid": "R3@D2"}}, + {"device_uuid": {"uuid": "R4@D2"}}, + {"device_uuid": {"uuid": "R5@D2"}}, + {"device_uuid": {"uuid": "R6@D2"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, + {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}}, + {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}}, + {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}}, + {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}}, + {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}}, + {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"}, + {"sample_types": [], "type": "copper/border", "uuid": "D1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "6"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "5"}, + {"sample_types": [], "type": "copper/border", "uuid": "D4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R5@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R6@D2"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D2/6==R6@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D2/5==R5@D2/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/4==R4@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/5==R5@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D2/6==R6@D2/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D2/6==R6@D2/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "6"}}, + {"device_id": {"device_uuid": {"uuid": "R6@D2"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D2/5==R5@D2/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D2"}}, "endpoint_uuid": {"uuid": "5"}}, + {"device_id": {"device_uuid": {"uuid": "R5@D2"}}, "endpoint_uuid": {"uuid": "4"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain3.json b/src/tests/scenario2/descriptors/domain3.json new file mode 100644 index 0000000000000000000000000000000000000000..3a8e47d30dcef471b388f46d4ba5df5df4716256 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain3.json @@ -0,0 +1,110 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D3"}}, + "device_ids": [ + {"device_uuid": {"uuid": "R1@D3"}}, + {"device_uuid": {"uuid": "R2@D3"}}, + {"device_uuid": {"uuid": "R3@D3"}}, + {"device_uuid": {"uuid": "R4@D3"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}}, + {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}}, + {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}}, + {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}}, + {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "R1@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D1"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "4"}, + {"sample_types": [], "type": "copper/border", "uuid": "D4"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R4@D3"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R1@D3/2==R2@D3/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D3/3==R3@D3/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R3@D3/4==R4@D3/3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R3@D3"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R4@D3/1==R1@D3/4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "R1@D3"}}, "endpoint_uuid": {"uuid": "4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D3/4==R4@D3/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D3"}}, "endpoint_uuid": {"uuid": "4"}}, + {"device_id": {"device_uuid": {"uuid": "R4@D3"}}, "endpoint_uuid": {"uuid": "2"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/domain4.json b/src/tests/scenario2/descriptors/domain4.json new file mode 100644 index 0000000000000000000000000000000000000000..d9e2d049ad2417beb96b8f3434ed9e94febb4808 --- /dev/null +++ b/src/tests/scenario2/descriptors/domain4.json @@ -0,0 +1,101 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}} + ], "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D4"}}, + "device_ids": [ + {"device_uuid": {"uuid": "DC2"}}, + {"device_uuid": {"uuid": "R1@D4"}}, + {"device_uuid": {"uuid": "R2@D4"}}, + {"device_uuid": {"uuid": "R3@D4"}} + ], "link_ids": [ + {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}}, + {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}}, + {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}}, + {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}} + ] + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "DC2"}}, "device_type": "emu-datacenter", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/border", "uuid": "D4"}, + {"sample_types": [], "type": "copper/internal", "uuid": "int"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R1@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/border", "uuid": "D3"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R2@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "3"}, + {"sample_types": [], "type": "copper/border", "uuid": "D2"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "R3@D4"}}, "device_type": "emu-packet-router", "device_drivers": [0], + "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"sample_types": [], "type": "copper/internal", "uuid": "1"}, + {"sample_types": [], "type": "copper/internal", "uuid": "2"}, + {"sample_types": [], "type": "copper/border", "uuid": "DC2"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "R3@D4/DC2==DC2/D4"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D4"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "DC2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D4/2==R2@D4/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R1@D4/3==R3@D4/1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R1@D4"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "R2@D4/3==R3@D4/2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "R2@D4"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "R3@D4"}}, "endpoint_uuid": {"uuid": "2"}} + ] + } + ] +} diff --git a/src/tests/scenario2/descriptors/idc-slice.json b/src/tests/scenario2/descriptors/idc-slice.json new file mode 100644 index 0000000000000000000000000000000000000000..634209284c00cc8602db2bf91e6088ca120710df --- /dev/null +++ b/src/tests/scenario2/descriptors/idc-slice.json @@ -0,0 +1,20 @@ +{ + "slices":[ + { + "slice_id":{"context_id":{"context_uuid":{"uuid":"admin"}},"slice_uuid":{"uuid":"idc-slice"}}, + "slice_endpoint_ids":[ + {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}}, + {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}} + ], + "slice_status":{"slice_status":1}, + "slice_service_ids":[], + "slice_subslice_ids":[], + "slice_constraints":[], + "slice_config":{"config_rules":[ + {"action":1,"custom":{"resource_key":"/settings","resource_value":"{}"}}, + {"action":1,"custom":{"resource_key":"/device[DC1]/endpoint[int]/settings","resource_value":"{}"}}, + {"action":1,"custom":{"resource_key":"/device[DC2]/endpoint[int]/settings","resource_value":"{}"}} + ]} + } + ] +} diff --git a/src/tests/scenario2/dump_logs.sh b/src/tests/scenario2/dump_logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..c2298fd8ef735eab102d463391004a818c874b42 --- /dev/null +++ b/src/tests/scenario2/dump_logs.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +rm -rf tmp/exec + +echo "Collecting logs for MockBlockChain..." +mkdir -p tmp/exec/mbc +kubectl --namespace tfs-bchain logs deployments/mock-blockchain server > tmp/exec/mbc/mock-blockchain.log +printf "\n" + +echo "Collecting logs for Domain 1..." +mkdir -p tmp/exec/dom1 +kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log +kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log +kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log +kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log +kubectl --namespace tfs-dom1 logs deployments/interdomainservice server > tmp/exec/dom1/interdomain.log +kubectl --namespace tfs-dom1 logs deployments/dltservice connector > tmp/exec/dom1/dlt-connector.log +kubectl --namespace tfs-dom1 logs deployments/dltservice gateway > tmp/exec/dom1/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 2..." +mkdir -p tmp/exec/dom2 +kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log +kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log +kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log +kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log +kubectl --namespace tfs-dom2 logs deployments/interdomainservice server > tmp/exec/dom2/interdomain.log +kubectl --namespace tfs-dom2 logs deployments/dltservice connector > tmp/exec/dom2/dlt-connector.log +kubectl --namespace tfs-dom2 logs deployments/dltservice gateway > tmp/exec/dom2/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 3..." +mkdir -p tmp/exec/dom3 +kubectl --namespace tfs-dom3 logs deployments/contextservice server > tmp/exec/dom3/context.log +kubectl --namespace tfs-dom3 logs deployments/deviceservice server > tmp/exec/dom3/device.log +kubectl --namespace tfs-dom3 logs deployments/serviceservice server > tmp/exec/dom3/service.log +kubectl --namespace tfs-dom3 logs deployments/pathcompservice frontend > tmp/exec/dom3/pathcomp-frontend.log +kubectl --namespace tfs-dom3 logs deployments/pathcompservice backend > tmp/exec/dom3/pathcomp-backend.log +kubectl --namespace tfs-dom3 logs deployments/sliceservice server > tmp/exec/dom3/slice.log +kubectl --namespace tfs-dom3 logs deployments/interdomainservice server > tmp/exec/dom3/interdomain.log +kubectl --namespace tfs-dom3 logs deployments/dltservice connector > tmp/exec/dom3/dlt-connector.log +kubectl --namespace tfs-dom3 logs deployments/dltservice gateway > tmp/exec/dom3/dlt-gateway.log +printf "\n" + +echo "Collecting logs for Domain 4..." +mkdir -p tmp/exec/dom4 +kubectl --namespace tfs-dom4 logs deployments/contextservice server > tmp/exec/dom4/context.log +kubectl --namespace tfs-dom4 logs deployments/deviceservice server > tmp/exec/dom4/device.log +kubectl --namespace tfs-dom4 logs deployments/serviceservice server > tmp/exec/dom4/service.log +kubectl --namespace tfs-dom4 logs deployments/pathcompservice frontend > tmp/exec/dom4/pathcomp-frontend.log +kubectl --namespace tfs-dom4 logs deployments/pathcompservice backend > tmp/exec/dom4/pathcomp-backend.log +kubectl --namespace tfs-dom4 logs deployments/sliceservice server > tmp/exec/dom4/slice.log +kubectl --namespace tfs-dom4 logs deployments/interdomainservice server > tmp/exec/dom4/interdomain.log +kubectl --namespace tfs-dom4 logs deployments/dltservice connector > tmp/exec/dom4/dlt-connector.log +kubectl --namespace tfs-dom4 logs deployments/dltservice gateway > tmp/exec/dom4/dlt-gateway.log +printf "\n" + +echo "Done!" diff --git a/src/tests/scenario2/fast_redeploy.sh b/src/tests/scenario2/fast_redeploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4880a5afb1e5f40f0848437f51d39447c2c0673 --- /dev/null +++ b/src/tests/scenario2/fast_redeploy.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +kubectl delete namespace tfs-dom1 tfs-dom2 tfs-dom3 tfs-dom4 + +echo "Deploying tfs-dom1 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl create namespace tfs-dom1 > ./tmp/logs/deploy-tfs-dom1.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom1.log +kubectl --namespace tfs-dom1 apply -f nfvsdn22/tfs-ingress-dom1.yaml > ./tmp/logs/deploy-tfs-dom1.log +printf "\n" + +echo "Deploying tfs-dom2 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl create namespace tfs-dom2 > ./tmp/logs/deploy-tfs-dom2.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom2.log +kubectl --namespace tfs-dom2 apply -f nfvsdn22/tfs-ingress-dom2.yaml > ./tmp/logs/deploy-tfs-dom2.log +printf "\n" + +echo "Deploying tfs-dom3 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl create namespace tfs-dom3 > ./tmp/logs/deploy-tfs-dom3.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom3.log +kubectl --namespace tfs-dom3 apply -f nfvsdn22/tfs-ingress-dom3.yaml > ./tmp/logs/deploy-tfs-dom3.log +printf "\n" + +echo "Deploying tfs-dom4 ..." +kubectl delete -f nfvsdn22/nginx-ingress-controller-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl create namespace tfs-dom4 > ./tmp/logs/deploy-tfs-dom4.log +kubectl apply -f nfvsdn22/nginx-ingress-controller-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/contextservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/deviceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/pathcompservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/serviceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/sliceservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/dltservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/interdomainservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f ./tmp/manifests/webuiservice.yaml > ./tmp/logs/deploy-tfs-dom4.log +kubectl --namespace tfs-dom4 apply -f nfvsdn22/tfs-ingress-dom4.yaml > ./tmp/logs/deploy-tfs-dom4.log +printf "\n" + +echo "Waiting tfs-dom1 ..." +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom1 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom2 ..." +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom2 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom3 ..." +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom3 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Waiting tfs-dom4 ..." +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/contextservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/deviceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/pathcompservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/serviceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/sliceservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/dltservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/interdomainservice +kubectl wait --namespace tfs-dom4 --for='condition=available' --timeout=300s deployment/webuiservice +printf "\n" + +echo "Done!" diff --git a/src/tests/scenario2/nginx-ingress-controller-dom1.yaml b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1aa1ba48be1bc78e5b0b349dd821e18f80b6953a --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom1.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom1 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom1 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom1 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom1 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom1 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom1 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom1 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8001 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4431 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12541 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom1 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom1 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom1 + - --election-id=ingress-controller-leader-dom1 + - --controller-class=tfs.etsi.org/controller-class-dom1 + - --ingress-class=tfs-ingress-class-dom1 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom2.yaml b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2dac1ecd26a5fd1c679b8e92ae28b51797987b71 --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom2.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom2 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom2 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom2 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom2 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom2 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom2 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom2 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8002 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4432 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12542 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom2 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom2 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom2 + - --election-id=ingress-controller-leader-dom2 + - --controller-class=tfs.etsi.org/controller-class-dom2 + - --ingress-class=tfs-ingress-class-dom2 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom3.yaml b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06eb6b75342e2b6340f6868404c82504da8e09ec --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom3.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom3 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom3 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom3 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom3 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom3 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom3 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom3 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8003 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4433 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12543 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom3 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom3 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom3 + - --election-id=ingress-controller-leader-dom3 + - --controller-class=tfs.etsi.org/controller-class-dom3 + - --ingress-class=tfs-ingress-class-dom3 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/nginx-ingress-controller-dom4.yaml b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c2e2f7004cd5ec8b5856b185c4c9de937a7d3f --- /dev/null +++ b/src/tests/scenario2/nginx-ingress-controller-dom4.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-udp-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-ingress-tcp-microk8s-conf-dom4 + namespace: ingress +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: tfs-ingress-class-dom4 + annotations: + ingressclass.kubernetes.io/is-default-class: "false" +spec: + controller: tfs.etsi.org/controller-class-dom4 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress-microk8s-controller-dom4 + namespace: ingress + labels: + microk8s-application: nginx-ingress-microk8s-dom4 +spec: + selector: + matchLabels: + name: nginx-ingress-microk8s-dom4 + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: nginx-ingress-microk8s-dom4 + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always + serviceAccountName: nginx-ingress-microk8s-serviceaccount + containers: + - image: k8s.gcr.io/ingress-nginx/controller:v1.2.0 + imagePullPolicy: IfNotPresent + name: nginx-ingress-microk8s + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + securityContext: + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 # www-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: 8004 + protocol: TCP + - name: https + containerPort: 443 + hostPort: 4434 + protocol: TCP + - name: health + containerPort: 10254 + hostPort: 12544 + protocol: TCP + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-microk8s-conf-dom4 + - --tcp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-tcp-microk8s-conf-dom4 + - --udp-services-configmap=$(POD_NAMESPACE)/nginx-ingress-udp-microk8s-conf-dom4 + - --election-id=ingress-controller-leader-dom4 + - --controller-class=tfs.etsi.org/controller-class-dom4 + - --ingress-class=tfs-ingress-class-dom4 + - ' ' + - --publish-status-address=127.0.0.1 diff --git a/src/tests/scenario2/reset.sh b/src/tests/scenario2/reset.sh new file mode 100755 index 0000000000000000000000000000000000000000..2bf2cd05559f632b960a5674ea59e334f5123a53 --- /dev/null +++ b/src/tests/scenario2/reset.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +kubectl --namespace tfs-dom1 scale --replicas=0 \ + deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \ + deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice + +kubectl --namespace tfs-dom1 scale --replicas=1 \ + deployment/contextservice deployment/deviceservice deployment/pathcompservice deployment/serviceservice \ + deployment/sliceservice deployment/dltservice deployment/interdomainservice deployment/webuiservice diff --git a/src/tests/scenario2/show_deploy.sh b/src/tests/scenario2/show_deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..081b5d3f9430cc3f68b0c1abdf39f0b05eeefae5 --- /dev/null +++ b/src/tests/scenario2/show_deploy.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +echo "Deployment Resources:" +kubectl --namespace tfs-dom1 get all +printf "\n" + +echo "Deployment Ingress:" +kubectl --namespace tfs-dom1 get ingress +printf "\n" diff --git a/src/tests/scenario2/tfs-ingress-dom1.yaml b/src/tests/scenario2/tfs-ingress-dom1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf2e40352d5acd85fcf9ee446df1a312a40556d6 --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom1.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom1 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom1 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom2.yaml b/src/tests/scenario2/tfs-ingress-dom2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40d9480d75dfad817bb1ffe2052a9a71dbb7322d --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom2.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom2 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom2 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom3.yaml b/src/tests/scenario2/tfs-ingress-dom3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28668b424aa8bd957e12e53583317f336e3b0640 --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom3.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom3 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom3 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/scenario2/tfs-ingress-dom4.yaml b/src/tests/scenario2/tfs-ingress-dom4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3774c327ca9ff6d46d538c7a2530a744187b957d --- /dev/null +++ b/src/tests/scenario2/tfs-ingress-dom4.yaml @@ -0,0 +1,39 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tfs-ingress-dom4 + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: tfs-ingress-class-dom4 + rules: + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /context(/|$)(.*) + pathType: Prefix + backend: + service: + name: contextservice + port: + number: 8080 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: computeservice + port: + number: 8080 diff --git a/src/tests/tools/__init__.py b/src/tests/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_osm/Constants.py b/src/tests/tools/mock_osm/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..44d74169f0fd68073ca4ed5272f3dc7ef3ebf958 --- /dev/null +++ b/src/tests/tools/mock_osm/Constants.py @@ -0,0 +1,16 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' diff --git a/src/compute/tests/mock_osm/MockOSM.py b/src/tests/tools/mock_osm/MockOSM.py similarity index 100% rename from src/compute/tests/mock_osm/MockOSM.py rename to src/tests/tools/mock_osm/MockOSM.py diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..25a8b6111443424e8bfd2b35501b96a9a762325f --- /dev/null +++ b/src/tests/tools/mock_osm/Tools.py @@ -0,0 +1,48 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Optional + +def compose_service_endpoint_id(site_id : str, endpoint_id : Dict): + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + return ':'.join([site_id, device_uuid, endpoint_uuid]) + +def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]): + ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] + ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] + service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id) + if pe_device_id is None: + bearer = '{:s}:{:s}'.format(ce_device_uuid, ce_endpoint_uuid) + else: + pe_device_uuid = pe_device_id['device_uuid']['uuid'] + bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) + mapping = { + 'service_endpoint_id': service_endpoint_id, + 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, + 'service_mapping_info': { + 'site-id': site_id, + 'bearer': {'bearer-reference': bearer}, + } + } + if priority is not None: mapping['service_mapping_info']['priority'] = priority + if len(redundant) > 0: mapping['service_mapping_info']['redundant'] = redundant + return service_endpoint_id, mapping + +def connection_point(service_endpoint_id : str, encapsulation_type : str, vlan_id : int): + return { + 'service_endpoint_id': service_endpoint_id, + 'service_endpoint_encapsulation_type': encapsulation_type, + 'service_endpoint_encapsulation_info': {'vlan': vlan_id} + } diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py similarity index 100% rename from src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py rename to src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py diff --git a/src/tests/tools/mock_osm/__init__.py b/src/tests/tools/mock_osm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70a33251242c51f49140e596b8208a19dd5245f7 --- /dev/null +++ b/src/tests/tools/mock_osm/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/compute/tests/mock_osm/acknowledgements.txt b/src/tests/tools/mock_osm/acknowledgements.txt similarity index 100% rename from src/compute/tests/mock_osm/acknowledgements.txt rename to src/tests/tools/mock_osm/acknowledgements.txt diff --git a/src/compute/tests/mock_osm/sdnconn.py b/src/tests/tools/mock_osm/sdnconn.py similarity index 100% rename from src/compute/tests/mock_osm/sdnconn.py rename to src/tests/tools/mock_osm/sdnconn.py diff --git a/src/webui/service/__init__.py b/src/webui/service/__init__.py index 75e1036420d0bc88a790fb7b65f4f4900abaaadd..d60cca6597ced52db8e320f3ba1beb2b032be65b 100644 --- a/src/webui/service/__init__.py +++ b/src/webui/service/__init__.py @@ -19,10 +19,10 @@ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient def get_working_context() -> str: - if 'context_uuid' in session: - return session['context_uuid'] - else: - return 'Not selected' + return session['context_uuid'] if 'context_uuid' in session else '---' + +def get_working_topology() -> str: + return session['topology_uuid'] if 'topology_uuid' in session else '---' def liveness(): pass @@ -85,6 +85,7 @@ def create_app(use_config=None, web_app_root=None): app.jinja_env.filters['from_json'] = from_json app.jinja_env.globals.update(get_working_context=get_working_context) + app.jinja_env.globals.update(get_working_topology=get_working_topology) if web_app_root is not None: app.wsgi_app = SetSubAppMiddleware(app.wsgi_app, web_app_root) diff --git a/src/webui/service/__main__.py b/src/webui/service/__main__.py index c194be4bcfe71f3665dba75a109aa5fdf9646a8d..ddbda9c511eac4554c168128b3318b3107d892d7 100644 --- a/src/webui/service/__main__.py +++ b/src/webui/service/__main__.py @@ -12,15 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os, sys, logging +import hashlib, sys, logging from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( - ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, get_service_baseurl_http, - get_service_port_http, get_setting, wait_for_environment_variables) + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, + get_service_baseurl_http, get_service_port_http, get_setting, wait_for_environment_variables) from webui.service import create_app from webui.Config import MAX_CONTENT_LENGTH, HOST, SECRET_KEY, DEBUG +def create_unique_session_cookie_name() -> str: + hostname = get_setting('HOSTNAME') + if hostname is None: return 'session' + hasher = hashlib.blake2b(digest_size=8) + hasher.update(hostname.encode('UTF-8')) + return 'session:{:s}'.format(str(hasher.hexdigest())) + def main(): log_level = get_log_level() logging.basicConfig(level=log_level) @@ -33,6 +40,8 @@ def main(): get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.SERVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.SLICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) logger.info('Starting...') @@ -49,6 +58,7 @@ def main(): app = create_app(use_config={ 'SECRET_KEY': SECRET_KEY, 'MAX_CONTENT_LENGTH': MAX_CONTENT_LENGTH, + 'SESSION_COOKIE_NAME': create_unique_session_cookie_name(), }, web_app_root=web_app_root) app.run(host=host, port=service_port, debug=debug) diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index f1423e92ed63fa778448978167c1c8e646414885..b57c5735d4b26c541d60a885512fe37a2fd626bc 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -16,7 +16,9 @@ from flask import current_app, render_template, Blueprint, flash, session, redir from common.proto.context_pb2 import ( ConfigActionEnum, ConfigRule, Device, DeviceDriverEnum, DeviceId, DeviceList, DeviceOperationalStatusEnum, - Empty) + Empty, TopologyId) +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from webui.service.device.forms import AddDeviceForm @@ -27,16 +29,28 @@ device_client = DeviceClient() @device.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + context_client.connect() - response: DeviceList = context_client.ListDevices(Empty()) + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + grpc_devices: DeviceList = context_client.ListDevices(Empty()) context_client.close() - return render_template('device/home.html', devices=response.devices, - dde=DeviceDriverEnum, - dose=DeviceOperationalStatusEnum) + + devices = [ + device for device in grpc_devices.devices + if device.device_id.device_uuid.uuid in topo_device_uuids + ] + + return render_template( + 'device/home.html', devices=devices, dde=DeviceDriverEnum, + dose=DeviceOperationalStatusEnum) @device.route('add', methods=['GET', 'POST']) def add(): diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 51e903d9ec28c5aaac20cd49e2f97dd7044e12bf..5b8831b7732443830a6f9b1ef8f7da92b4c41cc0 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -14,7 +14,9 @@ from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for -from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId +from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId, TopologyId +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient @@ -23,18 +25,28 @@ context_client = ContextClient() @link.get('/') def home(): - context_uuid = session.get('context_uuid', '-') - if context_uuid == "-": + if 'context_topology_uuid' not in session: flash("Please select a context!", "warning") return redirect(url_for("main.home")) - request = Empty() + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + context_client.connect() - response = context_client.ListLinks(request) + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + topo_link_uuids = {link_id.link_uuid.uuid for link_id in grpc_topology.link_ids} + grpc_links: LinkList = context_client.ListLinks(Empty()) context_client.close() + + links = [ + link for link in grpc_links.links + if link.link_id.link_uuid.uuid in topo_link_uuids + ] + return render_template( - "link/home.html", - links=response.links, - ) + 'link/home.html', links=links) + @link.route('detail/<path:link_uuid>', methods=('GET', 'POST')) def detail(link_uuid: str): diff --git a/src/webui/service/main/forms.py b/src/webui/service/main/forms.py index abef11e06d6222c6bbab527f3a41ccdc5918480f..b138592fccd3f65831673912d04aba79f2dd3c72 100644 --- a/src/webui/service/main/forms.py +++ b/src/webui/service/main/forms.py @@ -19,20 +19,21 @@ from wtforms import SelectField, FileField, SubmitField from wtforms.validators import DataRequired, Length -class ContextForm(FlaskForm): - context = SelectField( 'Context', - choices=[], - validators=[ - DataRequired(), - Length(min=1) - ]) - +class ContextTopologyForm(FlaskForm): + context_topology = SelectField( + 'Ctx/Topo', + choices=[], + validators=[ + DataRequired(), + Length(min=1) + ]) submit = SubmitField('Submit') class DescriptorForm(FlaskForm): - descriptors = FileField('Descriptors', - validators=[ - FileAllowed(['json'], 'JSON Descriptors only!') - ]) + descriptors = FileField( + 'Descriptors', + validators=[ + FileAllowed(['json'], 'JSON Descriptors only!') + ]) submit = SubmitField('Submit') diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 9b1b088579c5b01218316bf1c96b5208ff854609..0e008734730867bca741d748c49e3b0589b40e48 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging +import json, logging, re from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request -from common.proto.context_pb2 import Connection, Context, Device, Empty, Link, Service, Slice, Topology, ContextIdList +from common.proto.context_pb2 import Empty, ContextIdList, TopologyId, TopologyIdList +from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient -from webui.service.main.DescriptorTools import ( - format_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, - get_descriptors_add_topologies, split_devices_by_rules) -from webui.service.main.forms import ContextForm, DescriptorForm +from webui.service.main.forms import ContextTopologyForm, DescriptorForm main = Blueprint('main', __name__) @@ -34,38 +34,6 @@ slice_client = SliceClient() logger = logging.getLogger(__name__) -ENTITY_TO_TEXT = { - # name => singular, plural - 'context' : ('Context', 'Contexts' ), - 'topology' : ('Topology', 'Topologies' ), - 'device' : ('Device', 'Devices' ), - 'link' : ('Link', 'Links' ), - 'service' : ('Service', 'Services' ), - 'slice' : ('Slice', 'Slices' ), - 'connection': ('Connection', 'Connections'), -} - -ACTION_TO_TEXT = { - # action => infinitive, past - 'add' : ('Add', 'Added'), - 'update' : ('Update', 'Updated'), - 'config' : ('Configure', 'Configured'), -} - -def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities): - entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] - action_infinitive, action_past = ACTION_TO_TEXT[action_name] - num_ok, num_err = 0, 0 - for entity in entities: - try: - grpc_method(grpc_class(**entity)) - num_ok += 1 - except Exception as e: # pylint: disable=broad-except - flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error') - num_err += 1 - if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success') - if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger') - def process_descriptors(descriptors): try: descriptors_file = request.files[descriptors.name] @@ -75,128 +43,89 @@ def process_descriptors(descriptors): flash(f'Unable to load descriptor file: {str(e)}', 'danger') return - dummy_mode = descriptors.get('dummy_mode' , False) - contexts = descriptors.get('contexts' , []) - topologies = descriptors.get('topologies' , []) - devices = descriptors.get('devices' , []) - links = descriptors.get('links' , []) - services = descriptors.get('services' , []) - slices = descriptors.get('slices' , []) - connections = descriptors.get('connections', []) - - # Format CustomConfigRules in Devices, Services and Slices provided in JSON format - for device in devices: - config_rules = device.get('device_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - device['device_config']['config_rules'] = config_rules - - for service in services: - config_rules = service.get('service_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - service['service_config']['config_rules'] = config_rules - - for slice in slices: - config_rules = slice.get('slice_config', {}).get('config_rules', []) - config_rules = format_custom_config_rules(config_rules) - slice['slice_config']['config_rules'] = config_rules - - - # Context and Topology require to create the entity first, and add devices, links, services, slices, etc. in a - # second stage. - contexts_add = get_descriptors_add_contexts(contexts) - topologies_add = get_descriptors_add_topologies(topologies) - - if dummy_mode: - # Dummy Mode: used to pre-load databases (WebUI debugging purposes) with no smart or automated tasks. - context_client.connect() - process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add ) - process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add) - process_descriptor('device', 'add', context_client.SetDevice, Device, devices ) - process_descriptor('link', 'add', context_client.SetLink, Link, links ) - process_descriptor('service', 'add', context_client.SetService, Service, services ) - process_descriptor('slice', 'add', context_client.SetSlice, Slice, slices ) - process_descriptor('connection', 'add', context_client.SetConnection, Connection, connections ) - process_descriptor('context', 'update', context_client.SetContext, Context, contexts ) - process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies ) - context_client.close() - else: - # Normal mode: follows the automated workflows in the different components - assert len(connections) == 0, 'in normal mode, connections should not be set' - - # Device, Service and Slice require to first create the entity and the configure it - devices_add, devices_config = split_devices_by_rules(devices) - services_add = get_descriptors_add_services(services) - slices_add = get_descriptors_add_slices(slices) - - context_client.connect() - device_client.connect() - service_client.connect() - slice_client.connect() - - process_descriptor('context', 'add', context_client.SetContext, Context, contexts_add ) - process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies_add) - process_descriptor('device', 'add', device_client .AddDevice, Device, devices_add ) - process_descriptor('device', 'config', device_client .ConfigureDevice, Device, devices_config) - process_descriptor('link', 'add', context_client.SetLink, Link, links ) - process_descriptor('service', 'add', service_client.CreateService, Service, services_add ) - process_descriptor('service', 'update', service_client.UpdateService, Service, services ) - process_descriptor('slice', 'add', slice_client .CreateSlice, Slice, slices_add ) - process_descriptor('slice', 'update', slice_client .UpdateSlice, Slice, slices ) - process_descriptor('context', 'update', context_client.SetContext, Context, contexts ) - process_descriptor('topology', 'update', context_client.SetTopology, Topology, topologies ) - - slice_client.close() - service_client.close() - device_client.close() - context_client.close() + descriptor_loader = DescriptorLoader(descriptors) + results = descriptor_loader.process() + for message,level in compose_notifications(results): + flash(message, level) @main.route('/', methods=['GET', 'POST']) def home(): context_client.connect() device_client.connect() - response: ContextIdList = context_client.ListContextIds(Empty()) - context_form: ContextForm = ContextForm() - context_form.context.choices.append(('', 'Select...')) - - for context in response.context_ids: - context_form.context.choices.append((context.context_uuid.uuid, context.context_uuid)) - - if context_form.validate_on_submit(): - session['context_uuid'] = context_form.context.data - flash(f'The context was successfully set to `{context_form.context.data}`.', 'success') - return redirect(url_for("main.home")) - - if 'context_uuid' in session: - context_form.context.data = session['context_uuid'] + context_topology_form: ContextTopologyForm = ContextTopologyForm() + context_topology_form.context_topology.choices.append(('', 'Select...')) + + ctx_response: ContextIdList = context_client.ListContextIds(Empty()) + for context_id in ctx_response.context_ids: + context_uuid = context_id.context_uuid.uuid + topo_response: TopologyIdList = context_client.ListTopologyIds(context_id) + for topology_id in topo_response.topology_ids: + topology_uuid = topology_id.topology_uuid.uuid + context_topology_uuid = 'ctx[{:s}]/topo[{:s}]'.format(context_uuid, topology_uuid) + context_topology_name = 'Context({:s}):Topology({:s})'.format(context_uuid, topology_uuid) + context_topology_entry = (context_topology_uuid, context_topology_name) + context_topology_form.context_topology.choices.append(context_topology_entry) + + if context_topology_form.validate_on_submit(): + context_topology_uuid = context_topology_form.context_topology.data + if len(context_topology_uuid) > 0: + match = re.match('ctx\[([^\]]+)\]\/topo\[([^\]]+)\]', context_topology_uuid) + if match is not None: + session['context_topology_uuid'] = context_topology_uuid = match.group(0) + session['context_uuid'] = context_uuid = match.group(1) + session['topology_uuid'] = topology_uuid = match.group(2) + MSG = f'Context({context_uuid})/Topology({topology_uuid}) successfully selected.' + flash(MSG, 'success') + return redirect(url_for("main.home")) + + if 'context_topology_uuid' in session: + context_topology_form.context_topology.data = session['context_topology_uuid'] descriptor_form: DescriptorForm = DescriptorForm() try: if descriptor_form.validate_on_submit(): process_descriptors(descriptor_form.descriptors) return redirect(url_for("main.home")) - except Exception as e: + except Exception as e: # pylint: disable=broad-except logger.exception('Descriptor load failed') flash(f'Descriptor load failed: `{str(e)}`', 'danger') finally: context_client.close() device_client.close() - return render_template('main/home.html', context_form=context_form, descriptor_form=descriptor_form) + return render_template( + 'main/home.html', context_topology_form=context_topology_form, descriptor_form=descriptor_form) @main.route('/topology', methods=['GET']) def topology(): context_client.connect() try: + if 'context_topology_uuid' not in session: + return jsonify({'devices': [], 'links': []}) + + context_uuid = session['context_uuid'] + topology_uuid = session['topology_uuid'] + + json_topo_id = json_topology_id(topology_uuid, context_id=json_context_id(context_uuid)) + grpc_topology = context_client.GetTopology(TopologyId(**json_topo_id)) + + topo_device_uuids = {device_id.device_uuid.uuid for device_id in grpc_topology.device_ids} + topo_link_uuids = {link_id .link_uuid .uuid for link_id in grpc_topology.link_ids } + response = context_client.ListDevices(Empty()) - devices = [{ - 'id': device.device_id.device_uuid.uuid, - 'name': device.device_id.device_uuid.uuid, - 'type': device.device_type, - } for device in response.devices] + devices = [] + for device in response.devices: + if device.device_id.device_uuid.uuid not in topo_device_uuids: continue + devices.append({ + 'id': device.device_id.device_uuid.uuid, + 'name': device.device_id.device_uuid.uuid, + 'type': device.device_type, + }) response = context_client.ListLinks(Empty()) links = [] for link in response.links: + if link.link_id.link_uuid.uuid not in topo_link_uuids: continue if len(link.link_endpoint_ids) != 2: str_link = grpc_message_to_json_string(link) logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link)) diff --git a/src/webui/service/static/topology_icons/Acknowledgements.txt b/src/webui/service/static/topology_icons/Acknowledgements.txt index df5d16dc71d306893818ddfc7f7232fd203c7bcb..b285d225957b0a4e8c14ac4ae5e078597d2a1b27 100644 --- a/src/webui/service/static/topology_icons/Acknowledgements.txt +++ b/src/webui/service/static/topology_icons/Acknowledgements.txt @@ -1,6 +1,7 @@ Network Topology Icons taken from https://vecta.io/symbols -https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => cloud.png +https://symbols.getvecta.com/stencil_240/51_cloud.4d0a827676.png => network.png + #modified to be grey instead of white https://symbols.getvecta.com/stencil_240/15_atm-switch.1bbf9a7cca.png => packet-switch.png https://symbols.getvecta.com/stencil_241/45_atm-switch.6a7362c1df.png => emu-packet-switch.png diff --git a/src/webui/service/static/topology_icons/cloud.png b/src/webui/service/static/topology_icons/cloud.png deleted file mode 100644 index 0f8e9c9714edd1c11904367ef1e9c60ef7ed3295..0000000000000000000000000000000000000000 Binary files a/src/webui/service/static/topology_icons/cloud.png and /dev/null differ diff --git a/src/webui/service/static/topology_icons/network.png b/src/webui/service/static/topology_icons/network.png new file mode 100644 index 0000000000000000000000000000000000000000..1f770f7bb2a31834a191e6c8727f059e1f14bbe1 Binary files /dev/null and b/src/webui/service/static/topology_icons/network.png differ diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 5d7801d11880e89869120985307c6b43416f5a05..bee98ee82da3482caf1fad930d03d30572ba287d 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -103,7 +103,7 @@ </li> </ul> <span class="navbar-text" style="color: #fff;"> - Current context: <b>{{ get_working_context() }}</b> + Current Context(<b>{{ get_working_context() }}</b>)/Topology(<b>{{ get_working_topology() }}</b>) </span> </div> </div> diff --git a/src/webui/service/templates/main/home.html b/src/webui/service/templates/main/home.html index db390939ff926b5bbfbfc6507b0f4e79695f3693..43b066cc0227801672fc25780f27e3a699338632 100644 --- a/src/webui/service/templates/main/home.html +++ b/src/webui/service/templates/main/home.html @@ -19,7 +19,7 @@ {% block content %} <h2>ETSI TeraFlowSDN Controller</h2> - {% for field, message in context_form.errors.items() %} + {% for field, message in context_topology_form.errors.items() %} <div class="alert alert-dismissible fade show" role="alert"> <b>{{ field }}</b>: {{ message }} <button type="button" class="btn-close" data-bs-dismiss="alert" aria-label="Close"></button> @@ -28,32 +28,32 @@ {% endfor %} <form id="select_context" method="POST" enctype="multipart/form-data"> - {{ context_form.hidden_tag() }} + {{ context_topology_form.hidden_tag() }} <fieldset class="form-group"> - <legend>Select the working context, or upload a JSON descriptors file</legend> + <legend>Select the desired Context/Topology</legend> <div class="row mb-3"> - {{ context_form.context.label(class="col-sm-1 col-form-label") }} + {{ context_topology_form.context_topology.label(class="col-sm-1 col-form-label") }} <div class="col-sm-5"> - {% if context_form.context.errors %} - {{ context_form.context(class="form-select is-invalid") }} + {% if context_topology_form.context_topology.errors %} + {{ context_topology_form.context_topology(class="form-select is-invalid") }} <div class="invalid-feedback"> - {% for error in context_form.context.errors %} + {% for error in context_topology_form.context_topology.errors %} <span>{{ error }}</span> {% endfor %} </div> {% else %} - {{ context_form.context(class="form-select") }} + {{ context_topology_form.context_topology(class="form-select") }} {% endif %} </div> <div class="col-sm-2"> - {{ context_form.submit(class='btn btn-primary') }} + {{ context_topology_form.submit(class='btn btn-primary') }} </div> </div> </fieldset> </form> - <form id="select_context" method="POST" enctype="multipart/form-data"> - {{ context_form.hidden_tag() }} + <form id="upload_descriptors" method="POST" enctype="multipart/form-data"> + {{ descriptor_form.hidden_tag() }} <fieldset class="form-group"> <legend>Upload a JSON descriptors file</legend> <div class="row mb-3"> diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 975369ca21d294900c83537916bf527dce4810e9..e1f963e425e23216281068b82da23c809a677296 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -43,6 +43,7 @@ <div class="row mb-3"> <div class="col-sm-4"> + <b>Context: </b> {{ service.service_id.context_id.context_uuid.uuid }}<br><br> <b>UUID: </b> {{ service.service_id.service_uuid.uuid }}<br><br> <b>Type: </b> {{ ste.Name(service.service_type).replace('SERVICETYPE_', '') }}<br><br> <b>Status: </b> {{ sse.Name(service.service_status.service_status).replace('SERVICESTATUS_', '') }}<br><br> @@ -209,13 +210,17 @@ <ul> {% for sub_service_id in connection.sub_service_ids %} <li> + {% if sub_service_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('service.detail', service_uuid=sub_service_id.service_uuid.uuid) }}"> - {{ sub_service_id.service_uuid.uuid }} + {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ sub_service_id.context_id.context_uuid.uuid }} / {{ sub_service_id.service_uuid.uuid }} + {% endif %} </li> {% endfor %} </ul> diff --git a/src/webui/service/templates/slice/detail.html b/src/webui/service/templates/slice/detail.html index 07734f32304b60365f76413d4689a37b66cc60a3..889e10ce53b4a019b55f714c2442f32f0c2b8e93 100644 --- a/src/webui/service/templates/slice/detail.html +++ b/src/webui/service/templates/slice/detail.html @@ -44,7 +44,9 @@ <div class="row mb-3"> <div class="col-sm-4"> + <b>Context: </b> {{ slice.slice_id.context_id.context_uuid.uuid }}<br><br> <b>UUID: </b> {{ slice.slice_id.slice_uuid.uuid }}<br><br> + <b>Owner: </b> {{ slice.slice_owner.owner_uuid.uuid }}<br><br> <b>Status: </b> {{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}<br><br> </div> <div class="col-sm-8"> @@ -180,13 +182,17 @@ {% for service_id in slice.slice_service_ids %} <tr> <td> + {% if service_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('service.detail', service_uuid=service_id.service_uuid.uuid) }}"> - {{ service_id.service_uuid.uuid }} + {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ service_id.context_id.context_uuid.uuid }} / {{ service_id.service_uuid.uuid }} + {% endif %} </td> </tr> {% endfor %} @@ -204,13 +210,17 @@ {% for subslice_id in slice.slice_subslice_ids %} <tr> <td> + {% if subslice_id.context_id.context_uuid.uuid == session['context_uuid'] %} <a href="{{ url_for('slice.detail', slice_uuid=subslice_id.slice_uuid.uuid) }}"> - {{ subslice_id.slice_uuid.uuid }} + {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }} <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16"> <path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/> <path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/> </svg> </a> + {% else %} + {{ subslice_id.context_id.context_uuid.uuid }} / {{ subslice_id.slice_uuid.uuid }} + {% endif %} </td> </tr> {% endfor %} diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md index 3b55a0961da78fdc78a8feb31499608589b9d0be..04d585d24cc046e6a1aadc1c93118a1b36855aca 100644 --- a/tutorial/2-2-ofc22.md +++ b/tutorial/2-2-ofc22.md @@ -37,9 +37,6 @@ environment and a TeraFlowSDN controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python environment as described in [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). -Remember to source the scenario settings, e.g., `cd ~/tfs-ctrl && source ofc22/deploy_specs.sh` in each terminal you open. -Then, re-build the protocol buffers code from the proto files: -`./proto/generate_code_python.sh` ## 2.2.4. Access to the WebUI and Dashboard @@ -55,25 +52,33 @@ Notes: ## 2.2.5. Test execution -Before executing the tests, the environment variables need to be prepared. -First, make sure to load your deployment variables by: +Before executing the tests, we need to prepare a few things. + +First, you need to make sure that you have all the gRPC-generate code in your folder. +To do so, run: ``` -source my_deploy.sh +proto/generate_code_python.sh ``` -Then, you also need to load the environment variables to support the execution of the -tests by: +Then, it is time to deploy TeraFlowSDN with the correct specification for this scenario. +Make sure to load your deployment variables for this scenario by: ``` -source tfs_runtime_env_vars.sh +source ofc22/deploy_specs.sh ``` -You also need to make sure that you have all the gRPC-generate code in your folder. -To do so, run: +Then, you need to deploy the components by running: ``` -proto/generate_code_python.sh +./deploy.sh +``` + +After the deployment is finished, you need to load the environment variables to support +the execution of the tests by: + +``` +source tfs_runtime_env_vars.sh ``` To execute this functional test, four main steps needs to be carried out: @@ -90,8 +95,24 @@ See the troubleshooting section if needed. You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts after you execute each step. +There are two ways to execute the functional tests, *running all the tests with a single script* or *running each test independently*. +In the following we start with the first option, then we comment on how to run each test independently. + + +### 2.2.5.1. Running all tests with a single script + +We have a script that executes all the steps at once. +It is meant for being used to test if all components involved in this scenario are working correct. +To run all the functional tests, you can run: + +``` +ofc22/run_tests_and_coverage.sh +``` + +The following sections explain each one of the steps. -### 2.2.5.1. Device bootstrapping + +### 2.2.5.2. Device bootstrapping This step configures some basic entities (Context and Topology), the devices, and the links in the topology. @@ -103,7 +124,11 @@ The expected results are: To run this step, you can do it from the WebUI by uploading the file `./ofc22/tests/descriptors_emulated.json` that contains the descriptors of the contexts, topologies, devices, and links, or by -executing the `./ofc22/run_test_01_bootstrap.sh` script. +executing the script: + +``` +./ofc22/run_test_01_bootstrap.sh +``` When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). @@ -117,12 +142,16 @@ Note here that the emulated devices produce synthetic randomly-generated monitor and do not represent any particularservices configured. -### 2.2.5.2. L3VPN Service creation +### 2.2.5.3. L3VPN Service creation This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. -To run this step, execute the `./ofc22/run_test_02_create_service.sh` script. +To run this step, execute the script: + +``` +./ofc22/run_test_02_create_service.sh +``` When the script finishes, check the WebUI *Services* tab. You should see that two services have been created, one for the optical layer and another for the packet layer. @@ -133,13 +162,18 @@ the plots with the monitored data for the device. By default, device R1-EMU is selected. -### 2.2.5.3. L3VPN Service removal +### 2.2.5.4. L3VPN Service removal This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance. -To run this step, execute the `./ofc22/run_test_03_delete_service.sh` script, or delete -the L3NM service from the WebUI. +To run this step, execute the script: + +``` +./ofc22/run_test_03_delete_service.sh +``` + +or delete the L3NM service from the WebUI. When the script finishes, check the WebUI *Services* tab. You should see that the two services have been removed. @@ -149,12 +183,16 @@ In the Grafana Dashboard, given that there is no service configured, you should 0-valued flat plot again. -### 2.2.5.4. Cleanup +### 2.2.5.5. Cleanup This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. -To run this step, execute the `./ofc22/run_test_04_cleanup.sh` script. +To run this step, execute the script: + +``` +./ofc22/run_test_04_cleanup.sh +``` When the script finishes, check the WebUI *Devices* tab, you should see that the devices have been removed.