From 47b5f3426370c0a5bef279236ba8a5df97746bc8 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Tue, 27 Aug 2024 10:48:29 +0200 Subject: [PATCH 01/36] Add LOG_LEVEL to avoid WARNING on check_service_are_running script --- services/check_services_are_running.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/services/check_services_are_running.sh b/services/check_services_are_running.sh index 16de704..b7e7a7a 100755 --- a/services/check_services_are_running.sh +++ b/services/check_services_are_running.sh @@ -2,9 +2,10 @@ export CAPIF_PRIV_KEY= export CAPIF_PRIV_KEY_BASE_64= export MONITORING= +export LOG_LEVEL=DEBUG -running="$(docker compose -f docker-compose-vault.yml ps --services --all --filter "status=running")" -services="$(docker compose -f docker-compose-vault.yml ps --services --all)" +running="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-vault.yml ps --services --all --filter "status=running")" +services="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-vault.yml ps --services --all)" if [ "$running" != "$services" ]; then echo "Following Vault services are not running:" # Bash specific @@ -14,8 +15,8 @@ else echo "All Vault services are running" fi -running="$(docker compose -f docker-compose-capif.yml ps --services --all --filter "status=running")" -services="$(docker compose -f docker-compose-capif.yml ps --services --all)" +running="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-capif.yml ps --services --all --filter "status=running")" +services="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-capif.yml ps --services --all)" if [ "$running" != "$services" ]; then echo "Following CCF services are not running:" # Bash specific @@ -25,8 +26,8 @@ else echo "All CCF services are running" fi -running="$(docker compose -f docker-compose-register.yml ps --services --all --filter "status=running")" -services="$(docker compose -f docker-compose-register.yml ps --services --all)" +running="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-register.yml ps --services --all --filter "status=running")" +services="$(LOG_LEVEL=$LOG_LEVEL docker compose -f docker-compose-register.yml ps --services --all)" if [ "$running" != "$services" ]; then echo "Following Register services are not running:" # Bash specific -- GitLab From 9b5447a8ac5f626f066d429247624200ac07cd8b Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 28 Aug 2024 12:40:27 +0200 Subject: [PATCH 02/36] Improved variables.sh and robot scripts --- helm/scripts/create_remote_users.sh | 3 ++- helm/scripts/populate_create_remote_dummy_users.sh | 3 ++- helm/scripts/populate_remove_remote_dummy_users.sh | 3 ++- helm/scripts/remove_remote_users.sh | 3 ++- helm/scripts/run_remote_capif_tests.sh | 3 ++- helm/scripts/variables.sh | 12 ++++++++++++ services/create_users.sh | 3 ++- services/remove_users.sh | 3 ++- services/run_capif_tests.sh | 3 ++- 9 files changed, 28 insertions(+), 8 deletions(-) diff --git a/helm/scripts/create_remote_users.sh b/helm/scripts/create_remote_users.sh index 909fffd..153ea33 100755 --- a/helm/scripts/create_remote_users.sh +++ b/helm/scripts/create_remote_users.sh @@ -56,7 +56,7 @@ then fi # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 TEST_FOLDER=$CAPIF_BASE_DIR/tests @@ -98,6 +98,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/helm/scripts/populate_create_remote_dummy_users.sh b/helm/scripts/populate_create_remote_dummy_users.sh index d2e137d..1dc89b1 100755 --- a/helm/scripts/populate_create_remote_dummy_users.sh +++ b/helm/scripts/populate_create_remote_dummy_users.sh @@ -37,7 +37,7 @@ while getopts ":p:i:h" opt; do done # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 TEST_FOLDER=$CAPIF_BASE_DIR/tests @@ -80,6 +80,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/helm/scripts/populate_remove_remote_dummy_users.sh b/helm/scripts/populate_remove_remote_dummy_users.sh index 1a22319..3bcde32 100755 --- a/helm/scripts/populate_remove_remote_dummy_users.sh +++ b/helm/scripts/populate_remove_remote_dummy_users.sh @@ -26,7 +26,7 @@ while getopts ":h" opt; do done # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 TEST_FOLDER=$CAPIF_BASE_DIR/tests @@ -69,6 +69,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/helm/scripts/remove_remote_users.sh b/helm/scripts/remove_remote_users.sh index 1891fec..98f3f3f 100755 --- a/helm/scripts/remove_remote_users.sh +++ b/helm/scripts/remove_remote_users.sh @@ -39,7 +39,7 @@ then fi # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 TEST_FOLDER=$CAPIF_BASE_DIR/tests @@ -81,6 +81,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/helm/scripts/run_remote_capif_tests.sh b/helm/scripts/run_remote_capif_tests.sh index 0095300..37bba19 100755 --- a/helm/scripts/run_remote_capif_tests.sh +++ b/helm/scripts/run_remote_capif_tests.sh @@ -1,7 +1,7 @@ #!/bin/bash source $(dirname "$(readlink -f "$0")")/variables.sh -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 TEST_FOLDER=$CAPIF_BASE_DIR/tests @@ -39,6 +39,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 2a9b5a8..d927b82 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -1,9 +1,21 @@ #!/bin/bash +# Store KUBECONFIG if it's set +export DEFAULT_KUBECONFIG=$KUBECONFIG +if [ -z "$DEFAULT_KUBECONFIG" ]; then + echo "No default value loaded on KUBECONFIG variable." +else + echo "KUBECONFIG has a default value $DEFAULT_KUBECONFIG. If variables in this file doesn't have a custom value this default will be used." +fi + # Use custom kubeconfig. If you set here the path to a kubeconfig file it will be used in installation/uninstallation scripts export KUBECONFIG="" if [ -z "$KUBECONFIG" ]; then echo "The variable KUBECONFIG is empty. Using default k8s environment..." + if [ -n "$DEFAULT_KUBECONFIG" ]; then + KUBECONFIG="--kubeconfig $DEFAULT_KUBECONFIG" + echo "Using DEFAULT_KUBECONFIG $DEFAULT_KUBECONFIG" + fi else KUBECONFIG="--kubeconfig $KUBECONFIG" echo "The variable KUBECONFIG is not empty. Its value is: $KUBECONFIG" diff --git a/services/create_users.sh b/services/create_users.sh index 3dd07c3..14066a8 100755 --- a/services/create_users.sh +++ b/services/create_users.sh @@ -60,7 +60,7 @@ then fi # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 cd .. REPOSITORY_BASE_FOLDER=${PWD} @@ -105,6 +105,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/services/remove_users.sh b/services/remove_users.sh index c9f63a0..5325490 100755 --- a/services/remove_users.sh +++ b/services/remove_users.sh @@ -43,7 +43,7 @@ then fi # Other Stuff -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 cd .. REPOSITORY_BASE_FOLDER=${PWD} @@ -88,6 +88,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then diff --git a/services/run_capif_tests.sh b/services/run_capif_tests.sh index 29bb62e..5f1a2b2 100755 --- a/services/run_capif_tests.sh +++ b/services/run_capif_tests.sh @@ -1,6 +1,6 @@ #!/bin/bash -DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image DOCKER_ROBOT_IMAGE_VERSION=1.0 cd .. REPOSITORY_BASE_FOLDER=${PWD} @@ -41,6 +41,7 @@ then exit -1 fi +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' if [[ $? -ne 0 ]] then -- GitLab From ac4a328302962aea465cb2304596ce028193edd8 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 28 Aug 2024 12:46:46 +0200 Subject: [PATCH 03/36] Improved way to get KUBECONFIG --- helm/scripts/variables.sh | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index d927b82..e03a3fc 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -1,24 +1,16 @@ #!/bin/bash -# Store KUBECONFIG if it's set -export DEFAULT_KUBECONFIG=$KUBECONFIG -if [ -z "$DEFAULT_KUBECONFIG" ]; then - echo "No default value loaded on KUBECONFIG variable." -else - echo "KUBECONFIG has a default value $DEFAULT_KUBECONFIG. If variables in this file doesn't have a custom value this default will be used." -fi - # Use custom kubeconfig. If you set here the path to a kubeconfig file it will be used in installation/uninstallation scripts -export KUBECONFIG="" -if [ -z "$KUBECONFIG" ]; then +export CUSTOM_KUBECONFIG="" +if [ -z "$CUSTOM_KUBECONFIG" ]; then echo "The variable KUBECONFIG is empty. Using default k8s environment..." - if [ -n "$DEFAULT_KUBECONFIG" ]; then - KUBECONFIG="--kubeconfig $DEFAULT_KUBECONFIG" - echo "Using DEFAULT_KUBECONFIG $DEFAULT_KUBECONFIG" + if [ -n "$KUBECONFIG" ]; then + CUSTOM_KUBECONFIG="--kubeconfig $KUBECONFIG" + echo "Using DEFAULT_KUBECONFIG $CUSTOM_KUBECONFIG" fi else - KUBECONFIG="--kubeconfig $KUBECONFIG" - echo "The variable KUBECONFIG is not empty. Its value is: $KUBECONFIG" + CUSTOM_KUBECONFIG="--kubeconfig $CUSTOM_KUBECONFIG" + echo "The variable CUSTOM_KUBECONFIG is not empty. Its value is: $CUSTOM_KUBECONFIG" fi # timestap to use along scripts @@ -105,3 +97,5 @@ elif [ -z "$K8S_IP" ]; then K8S_IP=$(kubectl $KUBECONFIG get svc -A | grep ingress-nginx-controller | awk '/NodePort/{ print $4 }') echo "K8S_IP value will be $K8S_IP" fi + +export KUBECONFIG=$CUSTOM_KUBECONFIG \ No newline at end of file -- GitLab From 9357961da82b922311fa36b06f69f36c78a6adb2 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 28 Aug 2024 12:54:30 +0200 Subject: [PATCH 04/36] Added Storage class to variables.sh --- helm/scripts/install_capif.sh | 4 ++++ helm/scripts/variables.sh | 2 ++ 2 files changed, 6 insertions(+) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index bbf4b42..d012b66 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -15,8 +15,10 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set grafana.ingress.hosts[0].paths[0].pathType="Prefix" \ --set grafana.env.prometheusUrl=$PROMETHEUS_URL \ --set grafana.env.tempoUrl="http://$CAPIF_NAME_VERSION_CHART-tempo:3100" \ +--set grafana.persistence.storageClass=$STORAGE_CLASS \ --set fluentbit.enabled=true \ --set loki.enabled=true \ +--set loki.persistence.storageClass=$STORAGE_CLASS \ --set tempo.tempo.metricsGenerator.remoteWriteUrl=$PROMETHEUS_URL/api/v1/write \ --set otelcollector.enabled=true \ --set otelcollector.configMap.tempoEndpoint=$CAPIF_NAME_VERSION_CHART-tempo:4317 \ @@ -116,11 +118,13 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mock-server.ingress.hosts[0].paths[0].path="/" \ --set mock-server.ingress.hosts[0].paths[0].pathType="Prefix" \ --set mock-server.env.logLevel="DEBUG" \ +--set mongo-register.persistence.storageClass=$STORAGE_CLASS \ --set mongo-register-express.enabled=true \ --set mongo-register-express.ingress.enabled=true \ --set mongo-register-express.ingress.hosts[0].host="mongo-express-register-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-register-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-register-express.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set mongo.persistence.storageClass=$STORAGE_CLASS \ --set mongo-express.enabled=true \ --set mongo-express.ingress.enabled=true \ --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index e03a3fc..0ba57bc 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -57,6 +57,8 @@ export MONITORING_NAMESPACE=monitoring export MONITORING_SERVICE_NAME=monitoring # OpenCAPIF deployment variables +## Storage Class +export STORAGE_CLASS="nfs-01" ## Register and Capif hostname to be deployed export CAPIF_HOSTNAME="capif.testbed.develop" export REGISTER_HOSTNAME="register.testbed.develop" -- GitLab From 6640ad66fbae7ce4648f86186f15ceaad6050e60 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 28 Aug 2024 12:57:30 +0200 Subject: [PATCH 05/36] Fix minor issue with KUBECONFIG --- helm/scripts/variables.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 0ba57bc..600afd8 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -92,6 +92,9 @@ else fi echo "Using value on VAULT_ACCESS_TOKEN=$VAULT_ACCESS_TOKEN" +## Setup KUBECONFIG +export KUBECONFIG=$CUSTOM_KUBECONFIG + ### If K8S_IP is empty, then script will try to get ingress-nginx-controller NodePort to grant DNS resolution for register to connect locally to CAPIF nginx if [ "$K8S_IP" == "NONE" ]; then echo "K8S_IP value is NONE. Register service will not have local DNS resolution" @@ -100,4 +103,3 @@ elif [ -z "$K8S_IP" ]; then echo "K8S_IP value will be $K8S_IP" fi -export KUBECONFIG=$CUSTOM_KUBECONFIG \ No newline at end of file -- GitLab From eab30794b5d9b8c9f44c29b6c2d9defc3e407aa6 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 28 Aug 2024 15:15:42 +0200 Subject: [PATCH 06/36] Added resource limit configuration --- helm/scripts/install_capif.sh | 2 +- helm/scripts/variables.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index d012b66..f2bf13e 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -130,4 +130,4 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---wait --timeout=10m --create-namespace --atomic +--wait --timeout=10m --create-namespace --atomic $RESOURCES_LIMITS diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 600afd8..4d3b1a8 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -57,6 +57,7 @@ export MONITORING_NAMESPACE=monitoring export MONITORING_SERVICE_NAME=monitoring # OpenCAPIF deployment variables +export RESOURCES_LIMITS="NONE" ## Storage Class export STORAGE_CLASS="nfs-01" ## Register and Capif hostname to be deployed @@ -103,3 +104,23 @@ elif [ -z "$K8S_IP" ]; then echo "K8S_IP value will be $K8S_IP" fi +if [ "$RESOURCES_LIMITS" == "NONE" ]; then + RESOURCES_LIMITS="--set mongo-express.resources= + --set mongo-register-express.resources= + --set nginx.resources= + --set ocf-access-control-policy.resources= + --set ocf-api-invocation-logs.resources= + --set ocf-api-invoker-management.resources= + --set ocf-api-provider-management.resources= + --set mongo-express.resources= + --set ocf-auditing-api-logs.resources= + --set ocf-discover-service-api.resources= + --set ocf-events.resources= + --set ocf-publish-service-api.resources= + --set ocf-register.resources= + --set ocf-routing-info.resources= + --set ocf-security.resources= + --set redis.resources= " +else + RESOURCES_LIMITS="" +fi \ No newline at end of file -- GitLab From 8ac001e484e6cc288a0ac1b04b5ef826214e908e Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 29 Aug 2024 11:01:31 +0200 Subject: [PATCH 07/36] Added more customizable Storage parameters --- helm/capif/charts/grafana/templates/pvc.yaml | 3 +- helm/capif/charts/grafana/values.yaml | 2 + helm/capif/charts/loki/templates/pvc.yaml | 3 +- helm/capif/charts/loki/values.yaml | 2 + .../charts/mongo-register/templates/pvc.yaml | 3 +- helm/capif/charts/mongo-register/values.yaml | 2 + helm/capif/charts/mongo/templates/pvc.yaml | 3 +- helm/capif/charts/mongo/values.yaml | 2 + .../charts/prometheus/templates/pvc.yaml | 3 +- .../charts/prometheus/values.yaml | 2 + helm/scripts/install_capif.sh | 14 +++-- helm/scripts/install_vault.sh | 2 + helm/scripts/variables.sh | 54 +++++++++++-------- 13 files changed, 59 insertions(+), 36 deletions(-) diff --git a/helm/capif/charts/grafana/templates/pvc.yaml b/helm/capif/charts/grafana/templates/pvc.yaml index 7aa2b72..b6be2b4 100644 --- a/helm/capif/charts/grafana/templates/pvc.yaml +++ b/helm/capif/charts/grafana/templates/pvc.yaml @@ -8,8 +8,7 @@ metadata: name: grafana-claim0 spec: storageClassName: {{ .Values.persistence.storageClass }} - accessModes: - - ReadWriteMany + accessModes: {{ .Values.persistence.accessModes }} resources: requests: storage: {{ .Values.persistence.storage }} diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index 8391800..f081cc7 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -52,6 +52,8 @@ persistence: enable: true storage: 10Gi storageClass: nfs-01 + accessModes: + - ReadWriteMany service: type: ClusterIP diff --git a/helm/capif/charts/loki/templates/pvc.yaml b/helm/capif/charts/loki/templates/pvc.yaml index c6594b2..028cbe6 100644 --- a/helm/capif/charts/loki/templates/pvc.yaml +++ b/helm/capif/charts/loki/templates/pvc.yaml @@ -7,8 +7,7 @@ metadata: name: loki-claim0 spec: storageClassName: {{ .Values.persistence.storageClass }} - accessModes: - - ReadWriteMany + accessModes: {{ .Values.persistence.accessModes }} resources: requests: storage: {{ .Values.persistence.storage }} diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index 444311d..f3c1c07 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -45,6 +45,8 @@ persistence: enable: true storage: 100Mi storageClass: nfs-01 + accessModes: + - ReadWriteMany service: type: ClusterIP diff --git a/helm/capif/charts/mongo-register/templates/pvc.yaml b/helm/capif/charts/mongo-register/templates/pvc.yaml index 13f1733..b3477fd 100644 --- a/helm/capif/charts/mongo-register/templates/pvc.yaml +++ b/helm/capif/charts/mongo-register/templates/pvc.yaml @@ -6,8 +6,7 @@ metadata: name: mongo-register-pvc spec: storageClassName: {{ .Values.persistence.storageClass }} - accessModes: - - ReadWriteMany + accessModes: {{ .Values.persistence.accessModes }} resources: requests: storage: {{ .Values.persistence.storage }} \ No newline at end of file diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index dcb783f..9b2da60 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -94,6 +94,8 @@ autoscaling: persistence: storage: 8Gi storageClass: nfs-01 + accessModes: + - ReadWriteMany # Additional volumes on the output Deployment definition. volumes: diff --git a/helm/capif/charts/mongo/templates/pvc.yaml b/helm/capif/charts/mongo/templates/pvc.yaml index c0ceafd..876d9e3 100644 --- a/helm/capif/charts/mongo/templates/pvc.yaml +++ b/helm/capif/charts/mongo/templates/pvc.yaml @@ -6,8 +6,7 @@ metadata: name: mongo-pvc spec: storageClassName: {{ .Values.persistence.storageClass }} - accessModes: - - ReadWriteMany + accessModes: {{ .Values.persistence.accessModes }} resources: requests: storage: {{ .Values.persistence.storage }} \ No newline at end of file diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index 38e4b9b..5569bc4 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -97,6 +97,8 @@ autoscaling: persistence: storage: 8Gi storageClass: nfs-01 + accessModes: + - ReadWriteMany # Additional volumes on the output Deployment definition. volumes: diff --git a/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml b/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml index d9c2dbe..43ee6ec 100644 --- a/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml +++ b/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml @@ -6,8 +6,7 @@ metadata: labels: {{- include "prometheus.labels" . | nindent 4 }} spec: - accessModes: - - ReadWriteOnce + accessModes: {{ .Values.persistence.accessModes }} resources: requests: storage: {{ .Values.persistence.storage }} diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 1083bd8..5539d1c 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -43,6 +43,8 @@ securityContext: {} persistence: enable: false storage: 10Gi + accessModes: + - ReadWriteMany service: type: NodePort diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index f2bf13e..f701421 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -15,10 +15,12 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set grafana.ingress.hosts[0].paths[0].pathType="Prefix" \ --set grafana.env.prometheusUrl=$PROMETHEUS_URL \ --set grafana.env.tempoUrl="http://$CAPIF_NAME_VERSION_CHART-tempo:3100" \ ---set grafana.persistence.storageClass=$STORAGE_CLASS \ +--set grafana.persistence.storageClass=$CAPIF_STORAGE_CLASS \ +--set grafana.persistence.storage=$CAPIF_GRAFANA_STORAGE_SIZE \ --set fluentbit.enabled=true \ --set loki.enabled=true \ ---set loki.persistence.storageClass=$STORAGE_CLASS \ +--set loki.persistence.storageClass=$CAPIF_STORAGE_CLASS \ +--set loki.persistence.storage=$CAPIF_LOKI_STORAGE_SIZE \ --set tempo.tempo.metricsGenerator.remoteWriteUrl=$PROMETHEUS_URL/api/v1/write \ --set otelcollector.enabled=true \ --set otelcollector.configMap.tempoEndpoint=$CAPIF_NAME_VERSION_CHART-tempo:4317 \ @@ -118,16 +120,18 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mock-server.ingress.hosts[0].paths[0].path="/" \ --set mock-server.ingress.hosts[0].paths[0].pathType="Prefix" \ --set mock-server.env.logLevel="DEBUG" \ ---set mongo-register.persistence.storageClass=$STORAGE_CLASS \ +--set mongo-register.persistence.storageClass=$CAPIF_STORAGE_CLASS \ +--set mongo-register.persistence.storage=$CAPIF_MONGO_REGISTER_STORAGE_SIZE \ --set mongo-register-express.enabled=true \ --set mongo-register-express.ingress.enabled=true \ --set mongo-register-express.ingress.hosts[0].host="mongo-express-register-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-register-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-register-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---set mongo.persistence.storageClass=$STORAGE_CLASS \ +--set mongo.persistence.storageClass=$CAPIF_STORAGE_CLASS \ +--set mongo.persistence.storage=$CAPIF_MONGO_STORAGE_SIZE \ --set mongo-express.enabled=true \ --set mongo-express.ingress.enabled=true \ --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---wait --timeout=10m --create-namespace --atomic $RESOURCES_LIMITS +--wait --timeout=10m --create-namespace --atomic $RESOURCES_LIMITS $CAPIF_STORAGE_ACCESS_MODE diff --git a/helm/scripts/install_vault.sh b/helm/scripts/install_vault.sh index c06e560..03d2e7c 100755 --- a/helm/scripts/install_vault.sh +++ b/helm/scripts/install_vault.sh @@ -37,6 +37,8 @@ helm $KUBECONFIG repo add hashicorp https://helm.releases.hashicorp.com helm $KUBECONFIG upgrade --install vault hashicorp/vault -n $VAULT_NAMESPACE --set server.ingress.enabled=true \ --set server.ingress.hosts[0].host="$VAULT_HOSTNAME" \ --set server.ingress.ingressClassName=nginx \ +--set server.dataStorage.storageClass=$VAULT_STORAGE_CLASS \ +--set server.dataStorage.size=$VAULT_STORAGE_SIZE \ --set server.standalone.enabled=true --create-namespace # Loop to wait until the service is in "Running" state and has 0/1 ready replicas diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 4d3b1a8..645ab55 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -38,15 +38,15 @@ export VAULT_HOSTNAME=vault.testbed.develop export VAULT_NAMESPACE=ocf-vault export VAULT_SERVICE_NAME='vault' export LABEL_TO_CHECK="app.kubernetes.io/name" - ## File to store key and token export VAULT_FILE="$HELM_DIR/vault_keys.txt" - ## Vault domains to be included export DOMAIN1=*.testbed.pre-production export DOMAIN2=*.testbed.validation export DOMAIN3=*.testbed.develop - +## Vault Storage Configuration +export VAULT_STORAGE_CLASS=longhorn +export VAULT_STORAGE_SIZE=10Gi ## Vault configuration job VAULT_JOB_NAME=vault-pki @@ -59,7 +59,12 @@ export MONITORING_SERVICE_NAME=monitoring # OpenCAPIF deployment variables export RESOURCES_LIMITS="NONE" ## Storage Class -export STORAGE_CLASS="nfs-01" +export CAPIF_STORAGE_CLASS="nfs-01" +export CAPIF_STORAGE_ACCESS_MODE="ReadWriteOnce" +export CAPIF_GRAFANA_STORAGE_SIZE=10Gi +export CAPIF_LOKI_STORAGE_SIZE=100Mi +export CAPIF_MONGO_STORAGE_SIZE=8Gi +export CAPIF_MONGO_REGISTER_STORAGE_SIZE=8Gi ## Register and Capif hostname to be deployed export CAPIF_HOSTNAME="capif.testbed.develop" export REGISTER_HOSTNAME="register.testbed.develop" @@ -105,22 +110,29 @@ elif [ -z "$K8S_IP" ]; then fi if [ "$RESOURCES_LIMITS" == "NONE" ]; then - RESOURCES_LIMITS="--set mongo-express.resources= - --set mongo-register-express.resources= - --set nginx.resources= - --set ocf-access-control-policy.resources= - --set ocf-api-invocation-logs.resources= - --set ocf-api-invoker-management.resources= - --set ocf-api-provider-management.resources= - --set mongo-express.resources= - --set ocf-auditing-api-logs.resources= - --set ocf-discover-service-api.resources= - --set ocf-events.resources= - --set ocf-publish-service-api.resources= - --set ocf-register.resources= - --set ocf-routing-info.resources= - --set ocf-security.resources= - --set redis.resources= " + RESOURCES_LIMITS="--set mongo-express.resources=null + --set mongo-register-express.resources=null + --set nginx.resources=null + --set ocf-access-control-policy.resources=null + --set ocf-api-invocation-logs.resources=null + --set ocf-api-invoker-management.resources=null + --set ocf-api-provider-management.resources=null + --set mongo-express.resources=null + --set ocf-auditing-api-logs.resources=null + --set ocf-discover-service-api.resources=null + --set ocf-events.resources=null + --set ocf-publish-service-api.resources=null + --set ocf-register.resources=null + --set ocf-routing-info.resources=null + --set ocf-security.resources=null + --set redis.resources=null " else RESOURCES_LIMITS="" -fi \ No newline at end of file +fi + +if [ -n "$CAPIF_STORAGE_ACCESS_MODE" ]; then + CAPIF_STORAGE_ACCESS_MODE="--set mongo.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE + --set mongo-register.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE + --set loki.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE + --set grafana.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE + " \ No newline at end of file -- GitLab From bc17f725c5baaf78032ef4f49950b748c588e4ee Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 29 Aug 2024 11:02:28 +0200 Subject: [PATCH 08/36] Setup consistent default value to nfs-01 --- helm/scripts/variables.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 645ab55..962e7cf 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -45,7 +45,7 @@ export DOMAIN1=*.testbed.pre-production export DOMAIN2=*.testbed.validation export DOMAIN3=*.testbed.develop ## Vault Storage Configuration -export VAULT_STORAGE_CLASS=longhorn +export VAULT_STORAGE_CLASS=nfs-01 export VAULT_STORAGE_SIZE=10Gi ## Vault configuration job VAULT_JOB_NAME=vault-pki @@ -59,7 +59,7 @@ export MONITORING_SERVICE_NAME=monitoring # OpenCAPIF deployment variables export RESOURCES_LIMITS="NONE" ## Storage Class -export CAPIF_STORAGE_CLASS="nfs-01" +export CAPIF_STORAGE_CLASS=nfs-01 export CAPIF_STORAGE_ACCESS_MODE="ReadWriteOnce" export CAPIF_GRAFANA_STORAGE_SIZE=10Gi export CAPIF_LOKI_STORAGE_SIZE=100Mi -- GitLab From b65c66154e0db060cd5a2b3129821a9060091509 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 29 Aug 2024 11:11:25 +0200 Subject: [PATCH 09/36] fix minor error in variables.sh --- helm/scripts/variables.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 962e7cf..3455cff 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -135,4 +135,5 @@ if [ -n "$CAPIF_STORAGE_ACCESS_MODE" ]; then --set mongo-register.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE --set loki.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE --set grafana.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE - " \ No newline at end of file + " +fi \ No newline at end of file -- GitLab From 3bea50e56c4f4fb2ebd69e614270789089e55dd0 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 29 Aug 2024 12:17:01 +0200 Subject: [PATCH 10/36] Setup limits --- helm/capif/charts/mongo-express/values.yaml | 14 ++--- .../charts/mongo-register-express/values.yaml | 14 ++--- helm/capif/charts/nginx/values.yaml | 14 ++--- .../ocf-access-control-policy/values.yaml | 14 ++--- .../ocf-api-invocation-logs/values.yaml | 14 ++--- .../ocf-api-invoker-management/values.yaml | 14 ++--- .../ocf-api-provider-management/values.yaml | 14 ++--- .../charts/ocf-auditing-api-logs/values.yaml | 14 ++--- .../ocf-discover-service-api/values.yaml | 14 ++--- helm/capif/charts/ocf-events/values.yaml | 14 ++--- .../ocf-publish-service-api/values.yaml | 14 ++--- helm/capif/charts/ocf-register/values.yaml | 14 ++--- .../capif/charts/ocf-routing-info/values.yaml | 14 ++--- helm/capif/charts/ocf-security/values.yaml | 14 ++--- helm/capif/charts/redis/values.yaml | 14 ++--- helm/scripts/install_capif.sh | 2 +- helm/scripts/variables.sh | 59 +++++++++++++------ 17 files changed, 146 insertions(+), 125 deletions(-) diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index 36f3a8b..71ad108 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -66,17 +66,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index dd225f5..d9d26e9 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -67,17 +67,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index db0541e..8d5ac3f 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -74,17 +74,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index 9184d26..472e509 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index dc63d4b..e9b970f 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index e832c7d..c6b0c09 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 547bb05..2160a02 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index 859ba12..2596b4c 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index 6aa8e61..8581434 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index b3ca6b0..64e075e 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index ac32a98..2f8d3a9 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index ffa5d50..e26b36d 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -76,17 +76,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index d6c6a3d..c409c93 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index 2be4288..aa0aa7a 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index 4011e97..bc1897d 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -62,17 +62,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index f701421..335ee6d 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -134,4 +134,4 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---wait --timeout=10m --create-namespace --atomic $RESOURCES_LIMITS $CAPIF_STORAGE_ACCESS_MODE +--wait --timeout=10m --create-namespace --atomic $CAPIF_RESOURCES $CAPIF_STORAGE_ACCESS_MODE diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 3455cff..d6ab020 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -57,7 +57,11 @@ export MONITORING_NAMESPACE=monitoring export MONITORING_SERVICE_NAME=monitoring # OpenCAPIF deployment variables -export RESOURCES_LIMITS="NONE" +export CAPIF_RESOURCES="NONE" +export CAPIF_RESOURCES_LIMITS_CPU=100m +export CAPIF_RESOURCES_LIMITS_MEMORY=128Mi +export CAPIF_RESOURCES_REQUESTS_CPU=100m +export CAPIF_RESOURCES_REQUESTS_MEMORY=128Mi ## Storage Class export CAPIF_STORAGE_CLASS=nfs-01 export CAPIF_STORAGE_ACCESS_MODE="ReadWriteOnce" @@ -89,6 +93,7 @@ export VAULT_INTERNAL_HOSTNAME="$VAULT_SERVICE_NAME.$VAULT_NAMESPACE.svc.cluster export VAULT_PORT="8200" export VAULT_ACCESS_TOKEN="dev-only-token" +######### POST PROCESSING VARIABLES SET ######## ### To deploy in other environment we need to setup urls according to it and also using specific kubeconfig: if [ -f "$VAULT_FILE" ] && [ -s "$VAULT_FILE" ]; then VAULT_ACCESS_TOKEN=$(awk '/Initial Root Token/{ print $4 }' $VAULT_FILE) @@ -109,25 +114,41 @@ elif [ -z "$K8S_IP" ]; then echo "K8S_IP value will be $K8S_IP" fi -if [ "$RESOURCES_LIMITS" == "NONE" ]; then - RESOURCES_LIMITS="--set mongo-express.resources=null - --set mongo-register-express.resources=null - --set nginx.resources=null - --set ocf-access-control-policy.resources=null - --set ocf-api-invocation-logs.resources=null - --set ocf-api-invoker-management.resources=null - --set ocf-api-provider-management.resources=null - --set mongo-express.resources=null - --set ocf-auditing-api-logs.resources=null - --set ocf-discover-service-api.resources=null - --set ocf-events.resources=null - --set ocf-publish-service-api.resources=null - --set ocf-register.resources=null - --set ocf-routing-info.resources=null - --set ocf-security.resources=null - --set redis.resources=null " +capif_services=("fluentbit" +"grafana" +"loki" +"mock-server" +"mongo" +"mongo-express" +"mongo-register" +"mongo-register-express" +"nginx" +"ocf-access-control-policy" +"ocf-api-invocation-logs" +"ocf-api-invoker-management" +"ocf-api-provider-management" +"ocf-auditing-api-logs" +"ocf-discover-service-api" +"ocf-events" +"ocf-helper" +"ocf-publish-service-api" +"ocf-register" +"ocf-routing-info" +"ocf-security" +"otelcollector" +"redis" +"renderer") + +if [ "$CAPIF_RESOURCES" == "NONE" ]; then + echo "No Limits will be requested on deployment" + CAPIF_RESOURCES="" else - RESOURCES_LIMITS="" + for service in "${capif_services[@]}"; do + CAPIF_RESOURCES="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU + --set $service.resources.limits.memory=$CAPIF_RESOURCES_LIMITS_MEMORY + --set $service.resources.requests.cpu=$CAPIF_RESOURCES_REQUESTS_CPU + --set $service.resources.requests.memory=$CAPIF_RESOURCES_REQUESTS_MEMORY " + done fi if [ -n "$CAPIF_STORAGE_ACCESS_MODE" ]; then -- GitLab From 3b9901418522b3a752301a63bd206ae107f7c9a8 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 29 Aug 2024 13:38:53 +0200 Subject: [PATCH 11/36] Add grafana repository --- helm/scripts/install_capif.sh | 2 ++ helm/scripts/install_monitoring.sh | 1 + 2 files changed, 3 insertions(+) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index 335ee6d..6a0333a 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -1,6 +1,8 @@ #!/bin/bash source $(dirname "$(readlink -f "$0")")/variables.sh +helm repo add grafana https://grafana.github.io/helm-charts + ### download dependencies helm $KUBECONFIG dependency build $HELM_DIR/capif/ diff --git a/helm/scripts/install_monitoring.sh b/helm/scripts/install_monitoring.sh index f881f0a..80b608b 100755 --- a/helm/scripts/install_monitoring.sh +++ b/helm/scripts/install_monitoring.sh @@ -2,6 +2,7 @@ source $(dirname "$(readlink -f "$0")")/variables.sh helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo add grafana https://grafana.github.io/helm-charts helm $KUBECONFIG dependency build $HELM_DIR/monitoring-stack/ -- GitLab From c7ef433377b5cadd7c0674b3cc593eb2c486d836 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Fri, 30 Aug 2024 08:32:59 +0200 Subject: [PATCH 12/36] Show logs updated --- services/show_logs.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/services/show_logs.sh b/services/show_logs.sh index c2bcf52..a5b1e04 100755 --- a/services/show_logs.sh +++ b/services/show_logs.sh @@ -13,6 +13,10 @@ help() { exit 1 } +MONITORING_STATE=false +LOG_LEVEL=DEBUG +CAPIF_PRIV_KEY_BASE_64=$(echo "$(cat nginx/certs/server.key)") + if [[ $# -lt 1 ]] then echo "You must specify an option before run script." @@ -82,5 +86,5 @@ else help fi -docker compose ${FILES[@]} logs ${FOLLOW} +MONITORING=$MONITORING_STATE LOG_LEVEL=$LOG_LEVEL CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 docker compose ${FILES[@]} logs ${FOLLOW} -- GitLab From cc9c6eee493c7b31e05961ffc7250111b7f2f96b Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 2 Sep 2024 14:25:25 +0200 Subject: [PATCH 13/36] Added missed configuration on grafana deployment --- helm/capif/charts/grafana/templates/deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/helm/capif/charts/grafana/templates/deployment.yaml b/helm/capif/charts/grafana/templates/deployment.yaml index d74241f..c2e1a6e 100644 --- a/helm/capif/charts/grafana/templates/deployment.yaml +++ b/helm/capif/charts/grafana/templates/deployment.yaml @@ -36,6 +36,8 @@ spec: value: {{ quote .Values.env.gfSecurityAllowEmbedding }} - name: GF_PATHS_PROVISIONING value: /etc/grafana/provisioning + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} name: {{ .Chart.Name }} -- GitLab From 0f769206b4d0639ec905a27d9a8b00bfb272b200 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 2 Sep 2024 14:48:37 +0200 Subject: [PATCH 14/36] Added specific configurations for longhorn --- helm/scripts/install_capif.sh | 2 +- helm/scripts/variables.sh | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index 6a0333a..e106db8 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -136,4 +136,4 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---wait --timeout=10m --create-namespace --atomic $CAPIF_RESOURCES $CAPIF_STORAGE_ACCESS_MODE +--wait --timeout=10m --create-namespace --atomic $CAPIF_RESOURCES $CAPIF_STORAGE_ACCESS_MODE $CAPIF_RUN_AS_USER_CONFIG diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index d6ab020..56ab49d 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -157,4 +157,12 @@ if [ -n "$CAPIF_STORAGE_ACCESS_MODE" ]; then --set loki.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE --set grafana.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE " +fi + +export CAPIF_RUN_AS_USER_CONFIG="" +if [ "$CAPIF_STORAGE_CLASS" == "longhorn" ]; then + echo "$CAPIF_STORAGE_CLASS needs to configure runAsUser at mongo, mongo-register and grafana to 0, in order to allow write con PVC created." + CAPIF_RUN_AS_USER_CONFIG="--set mongo.securityContext.runAsUser=0 + --set mongo-register.securityContext.runAsUser=0 + --set grafana.securityContext.runAsUser=0" fi \ No newline at end of file -- GitLab From 3b834de65881912a9b274b4c507b1882c36ce699 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Tue, 3 Sep 2024 14:48:55 +0200 Subject: [PATCH 15/36] Fix values on scripts --- helm/scripts/get_ingress.sh | 9 +++++++-- helm/scripts/install_capif.sh | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/helm/scripts/get_ingress.sh b/helm/scripts/get_ingress.sh index eaed4e9..53b79c3 100755 --- a/helm/scripts/get_ingress.sh +++ b/helm/scripts/get_ingress.sh @@ -46,9 +46,14 @@ done if [[ -n "$NAMESPACE" && -n "$IP" ]] then echo "IP: $IP and namespace: $NAMESPACE" +elif [[ -n "$NAMESPACE" ]]; then + if [[ -n "$K8S_IP" ]]; then + IP=$K8S_IP + echo "Using K8S_IP found. IP: $IP and namespace: $NAMESPACE" + fi else - echo "IP ($IP) and NAMESPACE ($NAMESPACE) must be set" - exit -1 + echo "IP ($IP) and NAMESPACE ($NAMESPACE) must be set" + exit -1 fi diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index e106db8..6efabbf 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -124,6 +124,7 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mock-server.env.logLevel="DEBUG" \ --set mongo-register.persistence.storageClass=$CAPIF_STORAGE_CLASS \ --set mongo-register.persistence.storage=$CAPIF_MONGO_REGISTER_STORAGE_SIZE \ +--set mongo-register.extraFlags[0]="--repair" \ --set mongo-register-express.enabled=true \ --set mongo-register-express.ingress.enabled=true \ --set mongo-register-express.ingress.hosts[0].host="mongo-express-register-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ @@ -131,6 +132,7 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mongo-register-express.ingress.hosts[0].paths[0].pathType="Prefix" \ --set mongo.persistence.storageClass=$CAPIF_STORAGE_CLASS \ --set mongo.persistence.storage=$CAPIF_MONGO_STORAGE_SIZE \ +--set mongo.extraFlags[0]="--repair" \ --set mongo-express.enabled=true \ --set mongo-express.ingress.enabled=true \ --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ -- GitLab From 832fd0b9ace115614f1d8e797409cff1ee1531aa Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 12:32:40 +0200 Subject: [PATCH 16/36] Add variable no enable/disable snooker deployment on monitoring --- helm/scripts/install_monitoring.sh | 1 + helm/scripts/variables.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/helm/scripts/install_monitoring.sh b/helm/scripts/install_monitoring.sh index 80b608b..9d6a00f 100755 --- a/helm/scripts/install_monitoring.sh +++ b/helm/scripts/install_monitoring.sh @@ -14,5 +14,6 @@ helm $KUBECONFIG upgrade --install -n $MONITORING_NAMESPACE $MONITORING_SERVICE_ --set prometheus.ingress.hosts[0].host=$PROMETHEUS_HOSTNAME \ --set prometheus.ingress.hosts[0].paths[0].path="/" \ --set prometheus.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set skooner.enabled=$MONITORING_SNOOKER_ENABLED \ --wait --timeout=10m --create-namespace --atomic diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 56ab49d..622fd69 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -3,7 +3,7 @@ # Use custom kubeconfig. If you set here the path to a kubeconfig file it will be used in installation/uninstallation scripts export CUSTOM_KUBECONFIG="" if [ -z "$CUSTOM_KUBECONFIG" ]; then - echo "The variable KUBECONFIG is empty. Using default k8s environment..." + echo "The variable CUSTOM_KUBECONFIG is empty. Using default k8s environment..." if [ -n "$KUBECONFIG" ]; then CUSTOM_KUBECONFIG="--kubeconfig $KUBECONFIG" echo "Using DEFAULT_KUBECONFIG $CUSTOM_KUBECONFIG" @@ -55,6 +55,7 @@ VAULT_JOB_NAME=vault-pki export PROMETHEUS_HOSTNAME=prometheus.testbed.develop export MONITORING_NAMESPACE=monitoring export MONITORING_SERVICE_NAME=monitoring +export MONITORING_SNOOKER_ENABLED=false # OpenCAPIF deployment variables export CAPIF_RESOURCES="NONE" -- GitLab From 68daf158ff12a62b38646e7740a8ff0fe29c6e90 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 13:02:19 +0200 Subject: [PATCH 17/36] New set_ingress.sh script to add ingress to /etc/hosts --- helm/scripts/set_ingress.sh | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100755 helm/scripts/set_ingress.sh diff --git a/helm/scripts/set_ingress.sh b/helm/scripts/set_ingress.sh new file mode 100755 index 0000000..73ded1b --- /dev/null +++ b/helm/scripts/set_ingress.sh @@ -0,0 +1,60 @@ +#!/bin/bash +IP="" +NAMESPACE="" +source $(dirname "$(readlink -f "$0")")/variables.sh + +help() { + echo "Usage: $1 " + echo " -i : IP to use" + echo " -n : Namespace to get ingress information" + echo " -k : Kubeconfig to be used" + echo " -h : show this help" + exit 1 +} +# Read params +while getopts ":i:n:k:h" opt; do + case $opt in + i) + IP="$OPTARG" + ;; + n) + NAMESPACE="$OPTARG" + ;; + k) + KUBECONFIG="$OPTARG" + if [ -z "$KUBECONFIG" ]; then + echo "The variable KUBECONFIG is empty. Using default k8s environment..." + else + KUBECONFIG="--kubeconfig $KUBECONFIG" + echo "The variable KUBECONFIG is not empty. Its value is: $KUBECONFIG" + fi + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +if [[ -n "$NAMESPACE" && -n "$IP" ]] +then + echo "IP: $IP and namespace: $NAMESPACE" +elif [[ -n "$NAMESPACE" ]]; then + if [[ -n "$K8S_IP" ]]; then + IP=$K8S_IP + echo "Using K8S_IP found. IP: $IP and namespace: $NAMESPACE" + fi +else + echo "IP ($IP) and NAMESPACE ($NAMESPACE) must be set" + exit -1 +fi + + +kubectl $KUBECONFIG -n $NAMESPACE get ing|grep -v NAME|awk "{print \"$IP \"\$3}" > /etc/hosts -- GitLab From 981377a81e0e55188bce0e76ff969345e7f87e3b Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 14:05:39 +0200 Subject: [PATCH 18/36] New script to remove users by prefix --- helm/scripts/remove_remote_users_by_prefix.sh | 118 ++++++++++++++++++ tests/tasks/Users Management/users.robot | 3 + 2 files changed, 121 insertions(+) create mode 100755 helm/scripts/remove_remote_users_by_prefix.sh diff --git a/helm/scripts/remove_remote_users_by_prefix.sh b/helm/scripts/remove_remote_users_by_prefix.sh new file mode 100755 index 0000000..9f084e6 --- /dev/null +++ b/helm/scripts/remove_remote_users_by_prefix.sh @@ -0,0 +1,118 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# User to remove +USERNAME_PREFIX= + +help() { + echo "Usage: $1 " + echo " -u : User prefix to use" + echo " -h : show this help" + exit 1 +} + +# Read params +while getopts ":u:p:t:h" opt; do + case $opt in + u) + USERNAME_PREFIX="$OPTARG" + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +if [[ "$USERNAME_PREFIX" == "" ]] +then + echo "USERNAME_PREFIX must be set with option -u" + help + exit -1 +fi + +# Other Stuff +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/capif/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + +# Mock Server +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + +# Show variables +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $CAPIF_REGISTER" +echo "CAPIF_HTTP_PORT = $CAPIF_HTTP_PORT" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $CAPIF_VAULT" +echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" +echo "TOTAL_USERS=$TOTAL_USERS" +echo "USERNAME_PREFIX=$USERNAME_PREFIX" +echo "USER_PASSWORD=$USER_PASSWORD" +echo "MOCK_SERVER_URL=$MOCK_SERVER_URL" +echo "NOTIFICATION_DESTINATION_URL=$NOTIFICATION_DESTINATION_URL" + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker pull $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION || echo "Docker image ($DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION) not present on repository" +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +cd $CAPIF_BASE_DIR + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTP_PORT:$CAPIF_HTTP_PORT \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL \ + --variable USERNAME_PREFIX:$USERNAME_PREFIX \ + --include remove-users-by-prefix diff --git a/tests/tasks/Users Management/users.robot b/tests/tasks/Users Management/users.robot index 1558f86..64991b5 100644 --- a/tests/tasks/Users Management/users.robot +++ b/tests/tasks/Users Management/users.robot @@ -33,6 +33,9 @@ Create Client Users FOR ${counter} IN RANGE ${TOTAL_USERS} ${USERNAME}= Set Variable ${USERNAME_PREFIX}_${counter} + IF ${TOTAL_USERS} == 1 + ${USERNAME}= Set Variable ${USERNAME_PREFIX} + END ${resp}= Run Keyword And Continue On Failure Create User At Register ... ${USERNAME} -- GitLab From 0912615c1287053c994abb854543ea72d099f184 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 14:18:08 +0200 Subject: [PATCH 19/36] Fix set_ingress --- helm/scripts/set_ingress.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/scripts/set_ingress.sh b/helm/scripts/set_ingress.sh index 73ded1b..7f40347 100755 --- a/helm/scripts/set_ingress.sh +++ b/helm/scripts/set_ingress.sh @@ -57,4 +57,4 @@ else fi -kubectl $KUBECONFIG -n $NAMESPACE get ing|grep -v NAME|awk "{print \"$IP \"\$3}" > /etc/hosts +kubectl $KUBECONFIG -n $NAMESPACE get ing|grep -v NAME|awk "{print \"$IP \"\$3}" >> /etc/hosts -- GitLab From 3d193c07ce9767e78d84af94574e2b865bf6ed83 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 15:15:53 +0200 Subject: [PATCH 20/36] Added sleep at capif_api_acl-1 --- .../capif_api_access_control_policy.robot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot b/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot index 768367e..fad1785 100644 --- a/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot +++ b/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot @@ -74,7 +74,7 @@ Retrieve ACL ... username=${AEF_PROVIDER_USERNAME} Check Response Variable Type And Values ${resp} 200 AccessControlPolicyList - + Sleep 30s # Check returned values Should Not Be Empty ${resp.json()['apiInvokerPolicies']} Length Should Be ${resp.json()['apiInvokerPolicies']} 1 -- GitLab From a37d3210b76ce1de13a174ba41e9449ec261057c Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 4 Sep 2024 15:58:59 +0200 Subject: [PATCH 21/36] Add force option --- helm/scripts/uninstall_capif.sh | 59 +++++++++++++++++++++------- helm/scripts/uninstall_monitoring.sh | 59 +++++++++++++++++++++------- helm/scripts/uninstall_vault.sh | 58 +++++++++++++++++++++------ 3 files changed, 135 insertions(+), 41 deletions(-) diff --git a/helm/scripts/uninstall_capif.sh b/helm/scripts/uninstall_capif.sh index 1c9ed62..9860ebc 100755 --- a/helm/scripts/uninstall_capif.sh +++ b/helm/scripts/uninstall_capif.sh @@ -1,25 +1,56 @@ #!/bin/bash source $(dirname "$(readlink -f "$0")")/variables.sh -# Function to display a warning message -warning_message() { - echo "WARNING: This uninstallation process is irreversible." - echo "All data associated with CAPIF service will be permanently lost." - echo "Are you sure you want to continue? (yes/no)" +help() { + echo "Usage: $1 " + echo " -y : Force uninstall component" + echo " -h : show this help" + exit 1 } -# Display the warning message -warning_message +export FORCE=0 +# Read params +while getopts ":yh" opt; do + case $opt in + y) + FORCE=1 + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done -# Read the user input -read -r USER_INPUT +if [[ "$FORCE" == "0"]] + # Function to display a warning message + warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with CAPIF service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" + } -# Check if the user confirmed the uninstallation -if [ "$USER_INPUT" != "yes" ]; then - echo "Uninstallation aborted by the user." - exit 1 -fi + # Display the warning message + warning_message + + # Read the user input + read -r USER_INPUT + # Check if the user confirmed the uninstallation + if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 + fi +else + echo "Forced uninstall" +fi # Proceed with the uninstallation process echo "Proceeding with uninstallation..." diff --git a/helm/scripts/uninstall_monitoring.sh b/helm/scripts/uninstall_monitoring.sh index f59954a..eb131d4 100755 --- a/helm/scripts/uninstall_monitoring.sh +++ b/helm/scripts/uninstall_monitoring.sh @@ -1,25 +1,56 @@ #!/bin/bash source $(dirname "$(readlink -f "$0")")/variables.sh -# Function to display a warning message -warning_message() { - echo "WARNING: This uninstallation process is irreversible." - echo "All data associated with CAPIF service will be permanently lost." - echo "Are you sure you want to continue? (yes/no)" +help() { + echo "Usage: $1 " + echo " -y : Force uninstall component" + echo " -h : show this help" + exit 1 } -# Display the warning message -warning_message +export FORCE=0 +# Read params +while getopts ":yh" opt; do + case $opt in + y) + FORCE=1 + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done -# Read the user input -read -r USER_INPUT +if [[ "$FORCE" == "0"]] + # Function to display a warning message + warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with CAPIF service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" + } -# Check if the user confirmed the uninstallation -if [ "$USER_INPUT" != "yes" ]; then - echo "Uninstallation aborted by the user." - exit 1 -fi + # Display the warning message + warning_message + + # Read the user input + read -r USER_INPUT + # Check if the user confirmed the uninstallation + if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 + fi +else + echo "Forced uninstall" +fi # Proceed with the uninstallation process echo "Proceeding with uninstallation..." diff --git a/helm/scripts/uninstall_vault.sh b/helm/scripts/uninstall_vault.sh index 4f7d562..af54582 100755 --- a/helm/scripts/uninstall_vault.sh +++ b/helm/scripts/uninstall_vault.sh @@ -1,23 +1,55 @@ #!/bin/bash source $(dirname "$(readlink -f "$0")")/variables.sh -# Function to display a warning message -warning_message() { - echo "WARNING: This uninstallation process is irreversible." - echo "All data associated with Vault service will be permanently lost." - echo "Are you sure you want to continue? (yes/no)" +help() { + echo "Usage: $1 " + echo " -y : Force uninstall component" + echo " -h : show this help" + exit 1 } -# Display the warning message -warning_message +export FORCE=0 +# Read params +while getopts ":yh" opt; do + case $opt in + y) + FORCE=1 + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done -# Read the user input -read -r USER_INPUT +if [[ "$FORCE" == "0"]] + # Function to display a warning message + warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with Vault service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" + } -# Check if the user confirmed the uninstallation -if [ "$USER_INPUT" != "yes" ]; then - echo "Uninstallation aborted by the user." - exit 1 + # Display the warning message + warning_message + + # Read the user input + read -r USER_INPUT + + # Check if the user confirmed the uninstallation + if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 + fi +else + echo "Forced uninstall" fi # Proceed with the uninstallation process -- GitLab From 9d5c7070a9f10f56bcc3c2626709c0c09bcf73eb Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 5 Sep 2024 09:10:08 +0200 Subject: [PATCH 22/36] Fix check on uninstall scripts --- helm/scripts/uninstall_capif.sh | 2 +- helm/scripts/uninstall_monitoring.sh | 2 +- helm/scripts/uninstall_vault.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/helm/scripts/uninstall_capif.sh b/helm/scripts/uninstall_capif.sh index 9860ebc..bacd0e0 100755 --- a/helm/scripts/uninstall_capif.sh +++ b/helm/scripts/uninstall_capif.sh @@ -29,7 +29,7 @@ while getopts ":yh" opt; do esac done -if [[ "$FORCE" == "0"]] +if [ "$FORCE" == "0" ]; then # Function to display a warning message warning_message() { echo "WARNING: This uninstallation process is irreversible." diff --git a/helm/scripts/uninstall_monitoring.sh b/helm/scripts/uninstall_monitoring.sh index eb131d4..aff1706 100755 --- a/helm/scripts/uninstall_monitoring.sh +++ b/helm/scripts/uninstall_monitoring.sh @@ -29,7 +29,7 @@ while getopts ":yh" opt; do esac done -if [[ "$FORCE" == "0"]] +if [ "$FORCE" == "0" ]; then # Function to display a warning message warning_message() { echo "WARNING: This uninstallation process is irreversible." diff --git a/helm/scripts/uninstall_vault.sh b/helm/scripts/uninstall_vault.sh index af54582..cb9535f 100755 --- a/helm/scripts/uninstall_vault.sh +++ b/helm/scripts/uninstall_vault.sh @@ -29,7 +29,7 @@ while getopts ":yh" opt; do esac done -if [[ "$FORCE" == "0"]] +if [ "$FORCE" == "0" ]; then # Function to display a warning message warning_message() { echo "WARNING: This uninstallation process is irreversible." -- GitLab From f6edc0f7c5934549f979902d82f7986bbf76a491 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 5 Sep 2024 12:46:42 +0200 Subject: [PATCH 23/36] Minor change in variable name --- helm/scripts/install_capif.sh | 2 +- helm/scripts/variables.sh | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index 6efabbf..216d3ce 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -138,4 +138,4 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ --set mongo-express.ingress.hosts[0].paths[0].path="/" \ --set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ ---wait --timeout=10m --create-namespace --atomic $CAPIF_RESOURCES $CAPIF_STORAGE_ACCESS_MODE $CAPIF_RUN_AS_USER_CONFIG +--wait --timeout=10m --create-namespace --atomic $CAPIF_RESOURCES_RESERVE $CAPIF_STORAGE_ACCESS_MODE $CAPIF_RUN_AS_USER_CONFIG diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 622fd69..c1d1a04 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -58,14 +58,14 @@ export MONITORING_SERVICE_NAME=monitoring export MONITORING_SNOOKER_ENABLED=false # OpenCAPIF deployment variables -export CAPIF_RESOURCES="NONE" +export CAPIF_RESOURCES_RESERVE="YES" export CAPIF_RESOURCES_LIMITS_CPU=100m export CAPIF_RESOURCES_LIMITS_MEMORY=128Mi export CAPIF_RESOURCES_REQUESTS_CPU=100m export CAPIF_RESOURCES_REQUESTS_MEMORY=128Mi ## Storage Class export CAPIF_STORAGE_CLASS=nfs-01 -export CAPIF_STORAGE_ACCESS_MODE="ReadWriteOnce" +export CAPIF_STORAGE_ACCESS_MODE="ReadWriteMany" export CAPIF_GRAFANA_STORAGE_SIZE=10Gi export CAPIF_LOKI_STORAGE_SIZE=100Mi export CAPIF_MONGO_STORAGE_SIZE=8Gi @@ -140,12 +140,12 @@ capif_services=("fluentbit" "redis" "renderer") -if [ "$CAPIF_RESOURCES" == "NONE" ]; then +if [ "$CAPIF_RESOURCES_RESERVE" == "NO" ]; then echo "No Limits will be requested on deployment" - CAPIF_RESOURCES="" + CAPIF_RESOURCES_RESERVE="" else for service in "${capif_services[@]}"; do - CAPIF_RESOURCES="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU + CAPIF_RESOURCES_RESERVE="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU --set $service.resources.limits.memory=$CAPIF_RESOURCES_LIMITS_MEMORY --set $service.resources.requests.cpu=$CAPIF_RESOURCES_REQUESTS_CPU --set $service.resources.requests.memory=$CAPIF_RESOURCES_REQUESTS_MEMORY " -- GitLab From 7bb70277e51be10b49cd8c19376c4977e37e7c1e Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 5 Sep 2024 13:02:49 +0200 Subject: [PATCH 24/36] Add variable to setup tempo storage size --- helm/scripts/install_capif.sh | 1 + helm/scripts/variables.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh index 216d3ce..791b976 100755 --- a/helm/scripts/install_capif.sh +++ b/helm/scripts/install_capif.sh @@ -24,6 +24,7 @@ helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART --set loki.persistence.storageClass=$CAPIF_STORAGE_CLASS \ --set loki.persistence.storage=$CAPIF_LOKI_STORAGE_SIZE \ --set tempo.tempo.metricsGenerator.remoteWriteUrl=$PROMETHEUS_URL/api/v1/write \ +--set tempo.persistence.size=$CAPIF_TEMPO_STORAGE_SIZE \ --set otelcollector.enabled=true \ --set otelcollector.configMap.tempoEndpoint=$CAPIF_NAME_VERSION_CHART-tempo:4317 \ --set ocf-access-control-policy.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-access-control-policy-api \ diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index c1d1a04..042d532 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -70,6 +70,7 @@ export CAPIF_GRAFANA_STORAGE_SIZE=10Gi export CAPIF_LOKI_STORAGE_SIZE=100Mi export CAPIF_MONGO_STORAGE_SIZE=8Gi export CAPIF_MONGO_REGISTER_STORAGE_SIZE=8Gi +export CAPIF_TEMPO_STORAGE_SIZE=3Gi ## Register and Capif hostname to be deployed export CAPIF_HOSTNAME="capif.testbed.develop" export REGISTER_HOSTNAME="register.testbed.develop" -- GitLab From dda0fb5bfdc71a5c331a0dd97bdc3da9b7ad71d4 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Fri, 6 Sep 2024 10:10:56 +0200 Subject: [PATCH 25/36] Setup smoke tests --- .../capif_api_access_control_policy.robot | 8 ++++---- .../capif_auditing_api.robot | 2 +- .../capif_api_service_discover.robot | 4 ++-- tests/features/CAPIF Api Events/capif_events_api.robot | 6 +++--- .../capif_api_invoker_managenet.robot | 2 +- .../CAPIF Api Logging Service/capif_logging_api.robot | 2 +- .../capif_api_provider_management.robot | 6 +++--- .../capif_api_publish_service.robot | 6 +++--- .../CAPIF Security Api/capif_security_api.robot | 10 +++++----- 9 files changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot b/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot index fad1785..05716c2 100644 --- a/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot +++ b/tests/features/CAPIF Api Access Control Policy/capif_api_access_control_policy.robot @@ -19,7 +19,7 @@ ${AEF_ID_NOT_VALID} not-valid *** Test Cases *** Retrieve ACL - [Tags] capif_api_acl-1 + [Tags] capif_api_acl-1 smoke # Register APF ${register_user_info_provider}= Provider Default Registration @@ -74,7 +74,7 @@ Retrieve ACL ... username=${AEF_PROVIDER_USERNAME} Check Response Variable Type And Values ${resp} 200 AccessControlPolicyList - Sleep 30s + Sleep 30s # Check returned values Should Not Be Empty ${resp.json()['apiInvokerPolicies']} Length Should Be ${resp.json()['apiInvokerPolicies']} 1 @@ -258,7 +258,7 @@ Retrieve ACL with security context created by two different Invokers Should Be True ${API_INVOKER_2_PRESENT}==${True} Retrieve ACL filtered by api-invoker-id - [Tags] capif_api_acl-4 + [Tags] capif_api_acl-4 smoke # Register APF ${register_user_info_provider}= Provider Default Registration @@ -526,7 +526,7 @@ Retrieve ACL with AMF Certificate ... cause=Certificate not authorized Retrieve ACL with Invoker Certificate - [Tags] capif_api_acl-13 + [Tags] capif_api_acl-13 smoke ${register_user_info_invoker} ... ${register_user_info_provider} ... ${service_api_description_published}= diff --git a/tests/features/CAPIF Api Auditing Service/capif_auditing_api.robot b/tests/features/CAPIF Api Auditing Service/capif_auditing_api.robot index a1f2fef..41669f9 100644 --- a/tests/features/CAPIF Api Auditing Service/capif_auditing_api.robot +++ b/tests/features/CAPIF Api Auditing Service/capif_auditing_api.robot @@ -130,7 +130,7 @@ Get a log entry withut aefid and apiInvokerId ... cause=Mandatory parameters missing Get Log Entry with apiVersion filter - [Tags] capif_api_auditing_service-4 + [Tags] capif_api_auditing_service-4 smoke # Register APF ${register_user_info}= Provider Default Registration diff --git a/tests/features/CAPIF Api Discover Service/capif_api_service_discover.robot b/tests/features/CAPIF Api Discover Service/capif_api_service_discover.robot index e780526..bbfcffd 100644 --- a/tests/features/CAPIF Api Discover Service/capif_api_service_discover.robot +++ b/tests/features/CAPIF Api Discover Service/capif_api_service_discover.robot @@ -90,7 +90,7 @@ Discover Published service APIs by not registered API Invoker ... cause=API Invoker id not found Discover Published service APIs by registered API Invoker with 1 result filtered - [Tags] capif_api_discover_service-4 + [Tags] capif_api_discover_service-4 smoke # Register APF ${register_user_info}= Provider Default Registration @@ -138,7 +138,7 @@ Discover Published service APIs by registered API Invoker with 1 result filtered List Should Contain Value ${resp.json()['serviceAPIDescriptions']} ${service_api_description_published_1} Discover Published service APIs by registered API Invoker filtered with no match - [Tags] capif_api_discover_service-5 + [Tags] capif_api_discover_service-5 smoke # Register APF ${register_user_info}= Provider Default Registration diff --git a/tests/features/CAPIF Api Events/capif_events_api.robot b/tests/features/CAPIF Api Events/capif_events_api.robot index 07ed967..09f2189 100644 --- a/tests/features/CAPIF Api Events/capif_events_api.robot +++ b/tests/features/CAPIF Api Events/capif_events_api.robot @@ -141,7 +141,7 @@ Deletes an individual CAPIF Event Subscription with invalid SubscriptionId ... cause=You are not the owner of this resource Invoker receives Service API Invocation events - [Tags] capif_api_events-6 mockserver + [Tags] capif_api_events-6 mockserver smoke # Initialize Mock server Init Mock Server @@ -410,7 +410,7 @@ Provider subscribe to API Invoker events Wait Until Keyword Succeeds 5x 5s Check Mock Server Notification Events ${events_expected} Invoker subscribed to ACL update event - [Tags] capif_api_events-10 mockserver + [Tags] capif_api_events-10 mockserver smoke # Initialize Mock server Init Mock Server @@ -568,7 +568,7 @@ Provider receives an ACL unavailable event when invoker remove Security Context. Wait Until Keyword Succeeds 5x 5s Check Mock Server Notification Events ${events_expected} Invoker receives an Invoker Authorization Revoked and ACL unavailable event when Provider revoke Invoker Authorization. - [Tags] capif_api_events-12 mockserver + [Tags] capif_api_events-12 mockserver smoke # Initialize Mock server Init Mock Server diff --git a/tests/features/CAPIF Api Invoker Management/capif_api_invoker_managenet.robot b/tests/features/CAPIF Api Invoker Management/capif_api_invoker_managenet.robot index 6ab8ba5..57005b5 100644 --- a/tests/features/CAPIF Api Invoker Management/capif_api_invoker_managenet.robot +++ b/tests/features/CAPIF Api Invoker Management/capif_api_invoker_managenet.robot @@ -120,7 +120,7 @@ Offboard Network App Should Be Equal As Strings ${resp.status_code} 204 Offboard Not Previously Onboarded Network App - [Tags] capif_api_invoker_management-6 + [Tags] capif_api_invoker_management-6 smoke # Default Invoker Registration and Onboarding ${register_user_info} ${url} ${request_body}= Invoker Default Onboarding diff --git a/tests/features/CAPIF Api Logging Service/capif_logging_api.robot b/tests/features/CAPIF Api Logging Service/capif_logging_api.robot index fc2bfca..5f41d2c 100644 --- a/tests/features/CAPIF Api Logging Service/capif_logging_api.robot +++ b/tests/features/CAPIF Api Logging Service/capif_logging_api.robot @@ -18,7 +18,7 @@ ${API_INVOKER_NOT_VALID} not-valid *** Test Cases *** Create a log entry - [Tags] capif_api_logging_service-1 + [Tags] capif_api_logging_service-1 smoke # Register APF ${register_user_info}= Provider Default Registration diff --git a/tests/features/CAPIF Api Provider Management/capif_api_provider_management.robot b/tests/features/CAPIF Api Provider Management/capif_api_provider_management.robot index 31aa55e..9e4d81e 100644 --- a/tests/features/CAPIF Api Provider Management/capif_api_provider_management.robot +++ b/tests/features/CAPIF Api Provider Management/capif_api_provider_management.robot @@ -80,7 +80,7 @@ Register Api Provider Already registered ... cause=Identical provider reg sec Update Registered Api Provider - [Tags] capif_api_provider_management-3 + [Tags] capif_api_provider_management-3 smoke ${register_user_info}= Provider Default Registration ${request_body}= Set Variable ${register_user_info['provider_enrollment_details']} @@ -146,14 +146,14 @@ Update Not Registered Api Provider # ... username=${AMF_PROVIDER_USERNAME} # Call Method ${CAPIF_USERS} update_capif_users_dicts ${register_user_info['resource_url'].path} ${register_user_info['amf_username']} -# + # # Check Results # Check Response Variable Type And Values ${resp} 200 APIProviderEnrolmentDetails # ... apiProvDomInfo=ROBOT_TESTING_MOD Partially Update Not Registered Api Provider - [Tags] capif_api_provider_management-6 + [Tags] capif_api_provider_management-6 smoke ${register_user_info}= Provider Default Registration ${request_body}= Create Api Provider Enrolment Details Patch Body diff --git a/tests/features/CAPIF Api Publish Service/capif_api_publish_service.robot b/tests/features/CAPIF Api Publish Service/capif_api_publish_service.robot index f3556a4..5df5c4c 100644 --- a/tests/features/CAPIF Api Publish Service/capif_api_publish_service.robot +++ b/tests/features/CAPIF Api Publish Service/capif_api_publish_service.robot @@ -16,7 +16,7 @@ ${SERVICE_API_ID_NOT_VALID} not-valid *** Test Cases *** Publish API by Authorised API Publisher - [Tags] capif_api_publish_service-1 + [Tags] capif_api_publish_service-1 smoke # Register APF ${register_user_info}= Provider Default Registration @@ -54,7 +54,7 @@ Publish API by NON Authorised API Publisher ... cause=Publisher id not found Retrieve all APIs Published by Authorised apfId - [Tags] capif_api_publish_service-3 + [Tags] capif_api_publish_service-3 smoke # Register APF ${register_user_info}= Provider Default Registration @@ -262,7 +262,7 @@ Update APIs Published by NON Authorised apfId ... apiName=service_1 Delete API Published by Authorised apfId with valid serviceApiId - [Tags] capif_api_publish_service-11 + [Tags] capif_api_publish_service-11 smoke # Register APF ${register_user_info}= Provider Default Registration diff --git a/tests/features/CAPIF Security Api/capif_security_api.robot b/tests/features/CAPIF Security Api/capif_security_api.robot index e50d16a..6170e75 100644 --- a/tests/features/CAPIF Security Api/capif_security_api.robot +++ b/tests/features/CAPIF Security Api/capif_security_api.robot @@ -101,7 +101,7 @@ Create a security context for an API invoker with Invalid apiInvokerID ... cause=API Invoker not exists or invalid ID Retrieve the Security Context of an API Invoker - [Tags] capif_security_api-5 + [Tags] capif_security_api-5 smoke # Default Invoker Registration and Onboarding ${register_user_info_invoker} ${url} ${request_body}= Invoker Default Onboarding @@ -186,7 +186,7 @@ Retrieve the Security Context of an API Invoker with invalid apfId ... cause=User role must be aef Delete the Security Context of an API Invoker - [Tags] capif_security_api-8 + [Tags] capif_security_api-8 smoke # Default Invoker Registration and Onboarding ${register_user_info_invoker} ${url} ${request_body}= Invoker Default Onboarding @@ -290,7 +290,7 @@ Delete the Security Context of an API Invoker with invalid apiInvokerID ... cause=API Invoker not exists or invalid ID Update the Security Context of an API Invoker - [Tags] capif_security_api-12 + [Tags] capif_security_api-12 smoke # Default Invoker Registration and Onboarding ${register_user_info_invoker} ${url} ${request_body}= Invoker Default Onboarding @@ -408,7 +408,7 @@ Update the Security Context of an API Invoker with invalid apiInvokerID ... cause=API Invoker not exists or invalid ID Revoke the authorization of the API invoker for APIs - [Tags] capif_security_api-16 + [Tags] capif_security_api-16 smoke # Register APF ${register_user_info_provider}= Provider Default Registration @@ -569,7 +569,7 @@ Revoke the authorization of the API invoker for APIs with invalid apiInvokerId Dictionaries Should Be Equal ${resp.json()} ${security_context} Retrieve access token - [Tags] capif_security_api-19 + [Tags] capif_security_api-19 smoke # Register APF ${register_user_info_provider}= Provider Default Registration -- GitLab From 9ea02cd7fb1a10124d93cc51c994243e6c1f9eb5 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 11 Sep 2024 13:19:20 +0200 Subject: [PATCH 26/36] Detect if scripts are running on macos or not, to use gsed ot sed on them --- helm/scripts/install_vault.sh | 12 ++++++------ helm/scripts/variables.sh | 11 +++++++++++ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/helm/scripts/install_vault.sh b/helm/scripts/install_vault.sh index 03d2e7c..971d520 100755 --- a/helm/scripts/install_vault.sh +++ b/helm/scripts/install_vault.sh @@ -68,7 +68,7 @@ echo "Init vault" kubectl $KUBECONFIG exec -ti vault-0 -n $VAULT_NAMESPACE -- vault operator init -key-shares=1 -key-threshold=1 > $VAULT_FILE # Remove control characters -cat $VAULT_FILE | sed -r 's/\x1B\[[0-9;]*[JKmsu]//g' | sed -e 's/[^[:print:]\t\n]//g' > $VAULT_FILE.tmp +cat $VAULT_FILE | ${SED_CMD} -r 's/\x1B\[[0-9;]*[JKmsu]//g' | ${SED_CMD} -e 's/[^[:print:]\t\n]//g' > $VAULT_FILE.tmp mv $VAULT_FILE.tmp $VAULT_FILE # get UNSEAL Key and TOKEN @@ -99,11 +99,11 @@ while true; do fi done -sed -i "s/namespace:.*/namespace: $VAULT_NAMESPACE/g" $HELM_DIR/vault-job/vault-job.yaml -sed -i "s/VAULT_TOKEN=.*/VAULT_TOKEN=$VAULT_TOKEN/g" $HELM_DIR/vault-job/vault-job.yaml -sed -i "s/DOMAIN1=.*/DOMAIN1=$DOMAIN1/g" $HELM_DIR/vault-job/vault-job.yaml -sed -i "s/DOMAIN2=.*/DOMAIN2=$DOMAIN2/g" $HELM_DIR/vault-job/vault-job.yaml -sed -i "s/DOMAIN3=.*/DOMAIN3=$DOMAIN3/g" $HELM_DIR/vault-job/vault-job.yaml +${SED_CMD} -i "s/namespace:.*/namespace: $VAULT_NAMESPACE/g" $HELM_DIR/vault-job/vault-job.yaml +${SED_CMD} -i "s/VAULT_TOKEN=.*/VAULT_TOKEN=$VAULT_TOKEN/g" $HELM_DIR/vault-job/vault-job.yaml +${SED_CMD} -i "s/DOMAIN1=.*/DOMAIN1=$DOMAIN1/g" $HELM_DIR/vault-job/vault-job.yaml +${SED_CMD} -i "s/DOMAIN2=.*/DOMAIN2=$DOMAIN2/g" $HELM_DIR/vault-job/vault-job.yaml +${SED_CMD} -i "s/DOMAIN3=.*/DOMAIN3=$DOMAIN3/g" $HELM_DIR/vault-job/vault-job.yaml kubectl $KUBECONFIG delete job $VAULT_JOB_NAME -n $VAULT_NAMESPACE || echo "No vault job present" kubectl $KUBECONFIG -n $VAULT_NAMESPACE apply -f $HELM_DIR/vault-job/ diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 042d532..5283566 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -167,4 +167,15 @@ if [ "$CAPIF_STORAGE_CLASS" == "longhorn" ]; then CAPIF_RUN_AS_USER_CONFIG="--set mongo.securityContext.runAsUser=0 --set mongo-register.securityContext.runAsUser=0 --set grafana.securityContext.runAsUser=0" +fi + +export SED_CMD=sed +if [[ "$OSTYPE" == "darwin"* ]]; then + # Require gnu-sed. + if ! [ -x "$(command -v gsed)" ]; then + echo "Error: 'gsed' is not istalled." >&2 + echo "If you are using Homebrew, install with 'brew install gnu-sed'." >&2 + exit 1 + fi + SED_CMD=gsed fi \ No newline at end of file -- GitLab From 95a53fa947f21eb2b33a2ae9d2cc38c4c1ee2ef1 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Wed, 11 Sep 2024 13:42:58 +0200 Subject: [PATCH 27/36] Setup default limit for all helm charts deployed --- helm/capif/charts/fluentbit/values.yaml | 8 ++++---- helm/capif/charts/grafana/values.yaml | 8 ++++---- helm/capif/charts/loki/values.yaml | 8 ++++---- helm/capif/charts/mock-server/values.yaml | 8 ++++---- helm/capif/charts/mongo-express/values.yaml | 8 ++++---- helm/capif/charts/mongo-register-express/values.yaml | 8 ++++---- helm/capif/charts/mongo-register/values.yaml | 8 ++++---- helm/capif/charts/mongo/values.yaml | 8 ++++---- helm/capif/charts/nginx/values.yaml | 8 ++++---- helm/capif/charts/ocf-access-control-policy/values.yaml | 8 ++++---- helm/capif/charts/ocf-api-invocation-logs/values.yaml | 8 ++++---- helm/capif/charts/ocf-api-invoker-management/values.yaml | 8 ++++---- helm/capif/charts/ocf-api-provider-management/values.yaml | 8 ++++---- helm/capif/charts/ocf-auditing-api-logs/values.yaml | 8 ++++---- helm/capif/charts/ocf-discover-service-api/values.yaml | 8 ++++---- helm/capif/charts/ocf-events/values.yaml | 8 ++++---- helm/capif/charts/ocf-helper/values.yaml | 8 ++++---- helm/capif/charts/ocf-publish-service-api/values.yaml | 8 ++++---- helm/capif/charts/ocf-register/values.yaml | 8 ++++---- helm/capif/charts/ocf-routing-info/values.yaml | 8 ++++---- helm/capif/charts/ocf-security/values.yaml | 8 ++++---- helm/capif/charts/otelcollector/values.yaml | 8 ++++---- helm/capif/charts/redis/values.yaml | 8 ++++---- helm/capif/charts/renderer/values.yaml | 8 ++++---- helm/monitoring-stack/charts/grafana/values.yaml | 8 ++++---- helm/monitoring-stack/charts/prometheus/values.yaml | 8 ++++---- helm/monitoring-stack/charts/skooner/values.yaml | 8 ++++---- 27 files changed, 108 insertions(+), 108 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index 49aa7e7..f8d35e6 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -64,14 +64,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index f081cc7..d9eb4a0 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -75,14 +75,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index f3c1c07..c90a654 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -68,14 +68,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index a34433a..e5506eb 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -64,14 +64,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index 71ad108..45147b6 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -66,14 +66,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index d9d26e9..9f21355 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -67,14 +67,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 9b2da60..7bf2693 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -63,14 +63,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index 5569bc4..db4b443 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -63,14 +63,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index 8d5ac3f..98ef4c2 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -74,14 +74,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index 472e509..e95a5da 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -64,14 +64,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index e9b970f..bbed22b 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -69,14 +69,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index c6b0c09..5971f07 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -71,14 +71,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 2160a02..196b9dc 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -71,14 +71,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index 2596b4c..b1323a4 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -65,14 +65,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index 8581434..485b02a 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -65,14 +65,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index 64e075e..2c96542 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -65,14 +65,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-helper/values.yaml b/helm/capif/charts/ocf-helper/values.yaml index f9e35bd..272c32c 100644 --- a/helm/capif/charts/ocf-helper/values.yaml +++ b/helm/capif/charts/ocf-helper/values.yaml @@ -72,14 +72,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index 2f8d3a9..e93b53e 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -65,14 +65,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index e26b36d..e24cf75 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -76,14 +76,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index c409c93..1c40a17 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -63,14 +63,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index aa0aa7a..dcbe5d4 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -69,14 +69,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index cd541af..363c18b 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -69,14 +69,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index bc1897d..e85e73c 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -62,14 +62,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index 7696150..16f3c15 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -64,14 +64,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index 5b519b9..bd6d0ed 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -70,14 +70,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 5539d1c..408d35b 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -68,14 +68,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index ad511bf..012054f 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -58,14 +58,14 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi + limits: + cpu: 100m + memory: 128Mi # requests: # cpu: 100m # memory: 128Mi -- GitLab From d71dc9da65dbcb4d9f0feeafb7133aa5c3778004 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 16 Sep 2024 13:18:37 +0200 Subject: [PATCH 28/36] Setup requests resources with low values, and limits to avoid pods to get all resources if it's under stress --- helm/capif/charts/fluentbit/values.yaml | 6 +++--- helm/capif/charts/grafana/values.yaml | 6 +++--- helm/capif/charts/loki/values.yaml | 6 +++--- helm/capif/charts/mock-server/values.yaml | 6 +++--- helm/capif/charts/mongo-express/values.yaml | 6 +++--- helm/capif/charts/mongo-register-express/values.yaml | 6 +++--- helm/capif/charts/mongo-register/values.yaml | 6 +++--- helm/capif/charts/mongo/values.yaml | 6 +++--- helm/capif/charts/nginx/values.yaml | 6 +++--- helm/capif/charts/ocf-access-control-policy/values.yaml | 6 +++--- helm/capif/charts/ocf-api-invocation-logs/values.yaml | 6 +++--- helm/capif/charts/ocf-api-invoker-management/values.yaml | 6 +++--- .../capif/charts/ocf-api-provider-management/values.yaml | 6 +++--- helm/capif/charts/ocf-auditing-api-logs/values.yaml | 6 +++--- helm/capif/charts/ocf-discover-service-api/values.yaml | 6 +++--- helm/capif/charts/ocf-events/values.yaml | 6 +++--- helm/capif/charts/ocf-helper/values.yaml | 6 +++--- helm/capif/charts/ocf-publish-service-api/values.yaml | 6 +++--- helm/capif/charts/ocf-register/values.yaml | 6 +++--- helm/capif/charts/ocf-routing-info/values.yaml | 6 +++--- helm/capif/charts/ocf-security/values.yaml | 6 +++--- helm/capif/charts/otelcollector/values.yaml | 6 +++--- helm/capif/charts/redis/values.yaml | 6 +++--- helm/capif/charts/renderer/values.yaml | 6 +++--- helm/monitoring-stack/charts/grafana/values.yaml | 6 +++--- helm/monitoring-stack/charts/prometheus/values.yaml | 6 +++--- helm/monitoring-stack/charts/skooner/values.yaml | 6 +++--- services/run_capif_tests.sh | 9 ++++++++- 28 files changed, 89 insertions(+), 82 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index f8d35e6..3b800e4 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index d9eb4a0..f65f99a 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -83,9 +83,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi autoscaling: enabled: false diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index c90a654..88a6fac 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -76,9 +76,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index e5506eb..8da58e8 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index 45147b6..aeb889c 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -74,9 +74,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index 9f21355..1000cef 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -75,9 +75,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 7bf2693..9b7577b 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index db4b443..30f49ac 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index 98ef4c2..8a2f355 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -82,9 +82,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index e95a5da..6c1eebe 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index bbed22b..e4e1fc5 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index 5971f07..77880bc 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -79,9 +79,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 196b9dc..82c8ab4 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -79,9 +79,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index b1323a4..92fb75d 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index 485b02a..cd7c08f 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index 2c96542..2a2477c 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-helper/values.yaml b/helm/capif/charts/ocf-helper/values.yaml index 272c32c..7c6e9c9 100644 --- a/helm/capif/charts/ocf-helper/values.yaml +++ b/helm/capif/charts/ocf-helper/values.yaml @@ -80,9 +80,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index e93b53e..6218bb2 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index e24cf75..9fa534f 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -84,9 +84,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index 1c40a17..8084921 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index dcbe5d4..20de7fc 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index 363c18b..756463c 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index e85e73c..2711b1a 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -70,9 +70,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index 16f3c15..d84c5b1 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi livenessProbe: httpGet: diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index bd6d0ed..98777d4 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -78,9 +78,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 408d35b..202c41e 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -76,9 +76,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index 012054f..fe609aa 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -66,9 +66,9 @@ resources: limits: cpu: 100m memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + requests: + cpu: 10m + memory: 16Mi autoscaling: enabled: false diff --git a/services/run_capif_tests.sh b/services/run_capif_tests.sh index 5f1a2b2..b4a9e13 100755 --- a/services/run_capif_tests.sh +++ b/services/run_capif_tests.sh @@ -34,6 +34,13 @@ echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" echo "MOCK_SERVER_URL = $MOCK_SERVER_URL" +INPUT_OPTIONS=$@ +# Check if input is provided +if [ -z "$1" ]; then + # Set default value if no input is provided + INPUT_OPTIONS="--include all" +fi + docker >/dev/null 2>/dev/null if [[ $? -ne 0 ]] then @@ -75,4 +82,4 @@ docker run -ti --rm --network="host" \ --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ - --variable MOCK_SERVER_URL:$MOCK_SERVER_URL $@ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL $INPUT_OPTIONS -- GitLab From 0c23d4e451521d116c7343fd348383adc175602f Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 16 Sep 2024 16:13:28 +0200 Subject: [PATCH 29/36] Setup empty requests and limits on resources --- helm/scripts/variables.sh | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 5283566..0c3b70e 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -141,18 +141,6 @@ capif_services=("fluentbit" "redis" "renderer") -if [ "$CAPIF_RESOURCES_RESERVE" == "NO" ]; then - echo "No Limits will be requested on deployment" - CAPIF_RESOURCES_RESERVE="" -else - for service in "${capif_services[@]}"; do - CAPIF_RESOURCES_RESERVE="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU - --set $service.resources.limits.memory=$CAPIF_RESOURCES_LIMITS_MEMORY - --set $service.resources.requests.cpu=$CAPIF_RESOURCES_REQUESTS_CPU - --set $service.resources.requests.memory=$CAPIF_RESOURCES_REQUESTS_MEMORY " - done -fi - if [ -n "$CAPIF_STORAGE_ACCESS_MODE" ]; then CAPIF_STORAGE_ACCESS_MODE="--set mongo.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE --set mongo-register.persistence.accessModes[0]=$CAPIF_STORAGE_ACCESS_MODE @@ -178,4 +166,21 @@ if [[ "$OSTYPE" == "darwin"* ]]; then exit 1 fi SED_CMD=gsed +fi + +if [ "$CAPIF_RESOURCES_RESERVE" == "NO" ]; then + echo "No Limits will be requested on deployment" + CAPIF_RESOURCES_RESERVE="" + ${SED_CMD} -i "s/^resources:.*/resources: {}/g" **/**/values.yaml + ${SED_CMD} -i "s/^ limits:/# limits:/g" **/**/values.yaml + ${SED_CMD} -i "s/^ cpu:/# cpu:/g" **/**/values.yaml + ${SED_CMD} -i "s/^ memory:/# memory:/g" **/**/values.yaml + ${SED_CMD} -i "s/^ requests:/# requests:/g" **/**/values.yaml +else + for service in "${capif_services[@]}"; do + CAPIF_RESOURCES_RESERVE="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU + --set $service.resources.limits.memory=$CAPIF_RESOURCES_LIMITS_MEMORY + --set $service.resources.requests.cpu=$CAPIF_RESOURCES_REQUESTS_CPU + --set $service.resources.requests.memory=$CAPIF_RESOURCES_REQUESTS_MEMORY " + done fi \ No newline at end of file -- GitLab From 61515f6354e1de570e72a115e3f0aef39ba63bac Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 16 Sep 2024 16:27:33 +0200 Subject: [PATCH 30/36] Setup properly resources in helm. By default limits and requests set to 100m and 128Mi. --- helm/capif/charts/fluentbit/values.yaml | 4 ++-- helm/capif/charts/grafana/values.yaml | 4 ++-- helm/capif/charts/loki/values.yaml | 4 ++-- helm/capif/charts/mock-server/values.yaml | 4 ++-- helm/capif/charts/mongo-express/values.yaml | 4 ++-- helm/capif/charts/mongo-register-express/values.yaml | 4 ++-- helm/capif/charts/mongo-register/values.yaml | 4 ++-- helm/capif/charts/mongo/values.yaml | 4 ++-- helm/capif/charts/nginx/values.yaml | 4 ++-- .../capif/charts/ocf-access-control-policy/values.yaml | 4 ++-- helm/capif/charts/ocf-api-invocation-logs/values.yaml | 4 ++-- .../charts/ocf-api-invoker-management/values.yaml | 4 ++-- .../charts/ocf-api-provider-management/values.yaml | 4 ++-- helm/capif/charts/ocf-auditing-api-logs/values.yaml | 4 ++-- helm/capif/charts/ocf-discover-service-api/values.yaml | 4 ++-- helm/capif/charts/ocf-events/values.yaml | 4 ++-- helm/capif/charts/ocf-helper/values.yaml | 4 ++-- helm/capif/charts/ocf-publish-service-api/values.yaml | 4 ++-- helm/capif/charts/ocf-register/values.yaml | 4 ++-- helm/capif/charts/ocf-routing-info/values.yaml | 4 ++-- helm/capif/charts/ocf-security/values.yaml | 4 ++-- helm/capif/charts/otelcollector/values.yaml | 4 ++-- helm/capif/charts/redis/values.yaml | 4 ++-- helm/capif/charts/renderer/values.yaml | 4 ++-- helm/monitoring-stack/charts/grafana/values.yaml | 4 ++-- helm/monitoring-stack/charts/prometheus/values.yaml | 4 ++-- helm/monitoring-stack/charts/skooner/values.yaml | 4 ++-- helm/scripts/variables.sh | 10 +++++----- 28 files changed, 59 insertions(+), 59 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index 3b800e4..f681d04 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -73,8 +73,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index f65f99a..9c22900 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -84,8 +84,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi autoscaling: enabled: false diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index 88a6fac..d546c1f 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -77,8 +77,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index 8da58e8..9fdfd94 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -73,8 +73,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index aeb889c..36f3a8b 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -75,8 +75,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index 1000cef..dd225f5 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -76,8 +76,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 9b7577b..7d03654 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -72,8 +72,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index 30f49ac..b12b560 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -72,8 +72,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index 8a2f355..db0541e 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -83,8 +83,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index 6c1eebe..9184d26 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -73,8 +73,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index e4e1fc5..dc63d4b 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -78,8 +78,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index 77880bc..e832c7d 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -80,8 +80,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 82c8ab4..547bb05 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -80,8 +80,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index 92fb75d..859ba12 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -74,8 +74,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index cd7c08f..6aa8e61 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -74,8 +74,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index 2a2477c..b3ca6b0 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -74,8 +74,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-helper/values.yaml b/helm/capif/charts/ocf-helper/values.yaml index 7c6e9c9..6062246 100644 --- a/helm/capif/charts/ocf-helper/values.yaml +++ b/helm/capif/charts/ocf-helper/values.yaml @@ -81,8 +81,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index 6218bb2..ac32a98 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -74,8 +74,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index 9fa534f..ffa5d50 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -85,8 +85,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index 8084921..d6c6a3d 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -72,8 +72,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index 20de7fc..2be4288 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -78,8 +78,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index 756463c..7b76a17 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -78,8 +78,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index 2711b1a..4011e97 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -71,8 +71,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index d84c5b1..23237f3 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -73,8 +73,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi livenessProbe: httpGet: diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index 98777d4..f5f2e37 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -79,8 +79,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 202c41e..f4b6047 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -77,8 +77,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index fe609aa..9e84807 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -67,8 +67,8 @@ resources: cpu: 100m memory: 128Mi requests: - cpu: 10m - memory: 16Mi + cpu: 100m + memory: 128Mi autoscaling: enabled: false diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 0c3b70e..0f5f030 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -171,11 +171,11 @@ fi if [ "$CAPIF_RESOURCES_RESERVE" == "NO" ]; then echo "No Limits will be requested on deployment" CAPIF_RESOURCES_RESERVE="" - ${SED_CMD} -i "s/^resources:.*/resources: {}/g" **/**/values.yaml - ${SED_CMD} -i "s/^ limits:/# limits:/g" **/**/values.yaml - ${SED_CMD} -i "s/^ cpu:/# cpu:/g" **/**/values.yaml - ${SED_CMD} -i "s/^ memory:/# memory:/g" **/**/values.yaml - ${SED_CMD} -i "s/^ requests:/# requests:/g" **/**/values.yaml + ${SED_CMD} -i "s/^resources:.*/resources: {}/g" $HELM_DIR/**/**/**/values.yaml + ${SED_CMD} -i "s/^ limits:/# limits:/g" $HELM_DIR/**/**/**/values.yaml + ${SED_CMD} -i "s/^ cpu:/# cpu:/g" $HELM_DIR/**/**/**/values.yaml + ${SED_CMD} -i "s/^ memory:/# memory:/g" $HELM_DIR/**/**/**/values.yaml + ${SED_CMD} -i "s/^ requests:/# requests:/g" $HELM_DIR/**/**/**/values.yaml else for service in "${capif_services[@]}"; do CAPIF_RESOURCES_RESERVE="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU -- GitLab From 0206fc18a095482d118cc325c26a6f3a955dcb80 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Mon, 16 Sep 2024 17:02:36 +0200 Subject: [PATCH 31/36] Setup non relative paths in local scripts --- services/clean_capif_docker_services.sh | 16 ++++++++++------ services/clean_mock_server.sh | 5 ++++- services/monitoring/docker-compose.yml | 1 - services/run.sh | 16 ++++++++++------ services/run_mock_server.sh | 8 +++++++- services/show_logs.sh | 21 ++++++++++++++------- 6 files changed, 45 insertions(+), 22 deletions(-) diff --git a/services/clean_capif_docker_services.sh b/services/clean_capif_docker_services.sh index bd6ef11..dec71b8 100755 --- a/services/clean_capif_docker_services.sh +++ b/services/clean_capif_docker_services.sh @@ -1,5 +1,9 @@ #!/bin/bash +# Directories variables setup (no modification needed) +export SERVICES_DIR=$(dirname "$(readlink -f "$0")") +export CAPIF_BASE_DIR=$(dirname "$SERVICES_DIR") + help() { echo "Usage: $1 " echo " -c : Clean capif services" @@ -26,27 +30,27 @@ while getopts "cvrahms" opt; do case $opt in c) echo "Remove Capif services" - FILES+=("docker-compose-capif.yml") + FILES+=("$SERVICES_DIR/docker-compose-capif.yml") ;; v) echo "Remove vault service" - FILES+=("docker-compose-vault.yml") + FILES+=("$SERVICES_DIR/docker-compose-vault.yml") ;; r) echo "Remove register service" - FILES+=("docker-compose-register.yml") + FILES+=("$SERVICES_DIR/docker-compose-register.yml") ;; m) echo "Remove monitoring service" - FILES+=("../monitoring/docker-compose.yml") + FILES+=("$SERVICES_DIR/monitoring/docker-compose.yml") ;; s) echo "Robot Mock Server" - FILES+=("docker-compose-mock-server.yml") + FILES+=("$SERVICES_DIR/docker-compose-mock-server.yml") ;; a) echo "Remove all services" - FILES=("docker-compose-capif.yml" "docker-compose-vault.yml" "docker-compose-register.yml" "docker-compose-mock-server.yml" "../monitoring/docker-compose.yml") + FILES=("$SERVICES_DIR/docker-compose-capif.yml" "$SERVICES_DIR/docker-compose-vault.yml" "$SERVICES_DIR/docker-compose-register.yml" "$SERVICES_DIR/docker-compose-mock-server.yml" "$SERVICES_DIR//monitoring/docker-compose.yml") ;; h) help diff --git a/services/clean_mock_server.sh b/services/clean_mock_server.sh index 5ea886c..157e39a 100755 --- a/services/clean_mock_server.sh +++ b/services/clean_mock_server.sh @@ -1,6 +1,9 @@ #!/bin/bash -FILE="docker-compose-mock-server.yml" +# Directories variables setup (no modification needed) +export SERVICES_DIR=$(dirname "$(readlink -f "$0")") + +FILE="$SERVICES_DIR/docker-compose-mock-server.yml" echo "Executing 'docker compose down' for file $FILE" docker compose -f "$FILE" down --rmi all diff --git a/services/monitoring/docker-compose.yml b/services/monitoring/docker-compose.yml index 41f647f..e71dff5 100644 --- a/services/monitoring/docker-compose.yml +++ b/services/monitoring/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3' services: prometheus: image: prom/prometheus:latest diff --git a/services/run.sh b/services/run.sh index fa10d8a..c77083e 100755 --- a/services/run.sh +++ b/services/run.sh @@ -1,10 +1,14 @@ #!/bin/bash +# Directories variables setup (no modification needed) +export SERVICES_DIR=$(dirname "$(readlink -f "$0")") +export CAPIF_BASE_DIR=$(dirname "$SERVICES_DIR") + help() { echo "Usage: $1 " echo " -c : Setup different hostname for capif" echo " -s : Run Mock server" - echo " -m : Clean monitoring service" + echo " -m : Run monitoring service" echo " -l : Set Log Level (default DEBUG). Select one of: [CRITICAL, FATAL, ERROR, WARNING, WARN, INFO, DEBUG, NOTSET]" echo " -h : show this help" exit 1 @@ -69,7 +73,7 @@ if [ "$MONITORING_STATE" == "true" ] ; then echo '***Monitoring set as true***' echo '***Creating Monitoring stack***' - DUID=$DUID DGID=$DGID docker compose -f "./monitoring/docker-compose.yml" up --detach + DUID=$DUID DGID=$DGID docker compose -f "$SERVICES_DIR/monitoring/docker-compose.yml" up --detach status=$? if [ $status -eq 0 ]; then echo "*** Monitoring Stack Runing ***" @@ -81,7 +85,7 @@ fi docker network create capif-network -docker compose -f "docker-compose-vault.yml" up --detach --build +docker compose -f "$SERVICES_DIR/docker-compose-vault.yml" up --detach --build status=$? if [ $status -eq 0 ]; then @@ -91,7 +95,7 @@ else exit $status fi -CAPIF_HOSTNAME=$HOSTNAME MONITORING=$MONITORING_STATE LOG_LEVEL=$LOG_LEVEL docker compose -f "docker-compose-capif.yml" up --detach --build +CAPIF_HOSTNAME=$HOSTNAME MONITORING=$MONITORING_STATE LOG_LEVEL=$LOG_LEVEL docker compose -f "$SERVICES_DIR/docker-compose-capif.yml" up --detach --build status=$? if [ $status -eq 0 ]; then @@ -103,7 +107,7 @@ fi CAPIF_PRIV_KEY_BASE_64=$(echo "$(cat nginx/certs/server.key)") -CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 LOG_LEVEL=$LOG_LEVEL docker compose -f "docker-compose-register.yml" up --detach --build +CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 LOG_LEVEL=$LOG_LEVEL docker compose -f "$SERVICES_DIR/docker-compose-register.yml" up --detach --build status=$? if [ $status -eq 0 ]; then @@ -117,7 +121,7 @@ if [ "$ROBOT_MOCK_SERVER" == "true" ] ; then echo '***Robot Mock Server set as true***' echo '***Creating Robot Mock Server stack***' - IP=$IP PORT=$PORT docker compose -f "docker-compose-mock-server.yml" up --detach + IP=$IP PORT=$PORT docker compose -f "$SERVICES_DIR/docker-compose-mock-server.yml" up --detach status=$? if [ $status -eq 0 ]; then echo "*** Monitoring Stack Runing ***" diff --git a/services/run_mock_server.sh b/services/run_mock_server.sh index 5a194c4..f0ca4e3 100755 --- a/services/run_mock_server.sh +++ b/services/run_mock_server.sh @@ -1,5 +1,9 @@ #!/bin/bash +# Directories variables setup (no modification needed) +export SERVICES_DIR=$(dirname "$(readlink -f "$0")") +export CAPIF_BASE_DIR=$(dirname "$SERVICES_DIR") + help() { echo "Usage: $1 " echo " -i : Setup different host ip for mock server (default 0.0.0.0)" @@ -36,7 +40,9 @@ done echo Robot Framework Mock Server will listen on $IP:$PORT -IP=$IP PORT=$PORT docker compose -f "docker-compose-mock-server.yml" up --detach --build +docker network create capif-network || echo "capif-network previously created on docker networks" + +IP=$IP PORT=$PORT docker compose -f "$SERVICES_DIR/docker-compose-mock-server.yml" up --detach --build status=$? if [ $status -eq 0 ]; then diff --git a/services/show_logs.sh b/services/show_logs.sh index a5b1e04..747134e 100755 --- a/services/show_logs.sh +++ b/services/show_logs.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Directories variables setup (no modification needed) +export SERVICES_DIR=$(dirname "$(readlink -f "$0")") + help() { echo "Usage: $0 " echo " -c : Show capif services" @@ -27,32 +30,36 @@ FILES=() echo "${FILES[@]}" FOLLOW="" +# Needed to avoid write permissions on bind volumes with prometheus and grafana +DUID=$(id -u) +DGID=$(id -g) + # Read params while getopts "cvrahmfs" opt; do case $opt in c) echo "Show Capif services" - FILES+=("-f docker-compose-capif.yml") + FILES+=("-f $SERVICES_DIR/docker-compose-capif.yml") ;; v) echo "Show vault service" - FILES+=("-f docker-compose-vault.yml") + FILES+=("-f $SERVICES_DIR/docker-compose-vault.yml") ;; r) echo "Show register service" - FILES+=("-f docker-compose-register.yml") + FILES+=("-f $SERVICES_DIR/docker-compose-register.yml") ;; s) echo "Show Mock Server service" - FILES+=("-f docker-compose-mock-server.yml") + FILES+=("-f $SERVICES_DIR/docker-compose-mock-server.yml") ;; m) echo "Show monitoring service" - FILES+=("-f ../monitoring/docker-compose.yml") + FILES+=("-f $SERVICES_DIR/monitoring/docker-compose.yml") ;; a) echo "Show all services" - FILES=("-f docker-compose-capif.yml" -f "docker-compose-vault.yml" -f "docker-compose-register.yml" -f "docker-compose-mock-server.yml" -f "../monitoring/docker-compose.yml") + FILES=("-f $SERVICES_DIR/docker-compose-capif.yml" -f "$SERVICES_DIR/docker-compose-vault.yml" -f "$SERVICES_DIR/docker-compose-register.yml" -f "$SERVICES_DIR/docker-compose-mock-server.yml" -f "$SERVICES_DIR./monitoring/docker-compose.yml") ;; f) echo "Setup follow logs" @@ -86,5 +93,5 @@ else help fi -MONITORING=$MONITORING_STATE LOG_LEVEL=$LOG_LEVEL CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 docker compose ${FILES[@]} logs ${FOLLOW} +MONITORING=$MONITORING_STATE LOG_LEVEL=$LOG_LEVEL CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 DUID=$DUID DGID=$DGID docker compose ${FILES[@]} logs ${FOLLOW} -- GitLab From 834cd608ba8aa9c0282abde6b3c6bfcc1b3c9407 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Tue, 17 Sep 2024 10:38:35 +0200 Subject: [PATCH 32/36] Remove requested resources --- helm/capif/charts/fluentbit/values.yaml | 6 +++--- helm/capif/charts/grafana/values.yaml | 6 +++--- helm/capif/charts/loki/values.yaml | 6 +++--- helm/capif/charts/mock-server/values.yaml | 6 +++--- helm/capif/charts/mongo-express/values.yaml | 6 +++--- helm/capif/charts/mongo-register-express/values.yaml | 6 +++--- helm/capif/charts/mongo-register/values.yaml | 6 +++--- helm/capif/charts/mongo/values.yaml | 6 +++--- helm/capif/charts/nginx/values.yaml | 6 +++--- helm/capif/charts/ocf-access-control-policy/values.yaml | 6 +++--- helm/capif/charts/ocf-api-invocation-logs/values.yaml | 6 +++--- helm/capif/charts/ocf-api-invoker-management/values.yaml | 6 +++--- helm/capif/charts/ocf-api-provider-management/values.yaml | 6 +++--- helm/capif/charts/ocf-auditing-api-logs/values.yaml | 6 +++--- helm/capif/charts/ocf-discover-service-api/values.yaml | 6 +++--- helm/capif/charts/ocf-events/values.yaml | 6 +++--- helm/capif/charts/ocf-publish-service-api/values.yaml | 6 +++--- helm/capif/charts/ocf-register/values.yaml | 6 +++--- helm/capif/charts/ocf-routing-info/values.yaml | 6 +++--- helm/capif/charts/ocf-security/values.yaml | 6 +++--- helm/capif/charts/otelcollector/values.yaml | 6 +++--- helm/capif/charts/redis/values.yaml | 6 +++--- helm/capif/charts/renderer/values.yaml | 6 +++--- helm/monitoring-stack/charts/grafana/values.yaml | 6 +++--- helm/monitoring-stack/charts/prometheus/values.yaml | 6 +++--- helm/monitoring-stack/charts/skooner/values.yaml | 6 +++--- 26 files changed, 78 insertions(+), 78 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index f681d04..f8d35e6 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index 9c22900..d9eb4a0 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -83,9 +83,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi autoscaling: enabled: false diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index d546c1f..c90a654 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -76,9 +76,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index 9fdfd94..e5506eb 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index 36f3a8b..45147b6 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -74,9 +74,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index dd225f5..9f21355 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -75,9 +75,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 7d03654..7bf2693 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index b12b560..db4b443 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index db0541e..98ef4c2 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -82,9 +82,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index 9184d26..e95a5da 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index dc63d4b..bbed22b 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index e832c7d..5971f07 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -79,9 +79,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 547bb05..196b9dc 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -79,9 +79,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index 859ba12..b1323a4 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index 6aa8e61..485b02a 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index b3ca6b0..2c96542 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index ac32a98..e93b53e 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -73,9 +73,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index ffa5d50..e24cf75 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -84,9 +84,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index d6c6a3d..1c40a17 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -71,9 +71,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index 2be4288..dcbe5d4 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index 7b76a17..363c18b 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -77,9 +77,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index 4011e97..e85e73c 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -70,9 +70,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index 23237f3..16f3c15 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -72,9 +72,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi livenessProbe: httpGet: diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index f5f2e37..bd6d0ed 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -78,9 +78,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index f4b6047..408d35b 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -76,9 +76,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index 9e84807..012054f 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -66,9 +66,9 @@ resources: limits: cpu: 100m memory: 128Mi - requests: - cpu: 100m - memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi autoscaling: enabled: false -- GitLab From 2aaf389cdc0cb74b3b241b15fcd08b1621bbc6d6 Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Tue, 17 Sep 2024 10:49:34 +0200 Subject: [PATCH 33/36] Remove resources limits and requests --- helm/capif/charts/fluentbit/values.yaml | 14 +++++++------- helm/capif/charts/grafana/values.yaml | 14 +++++++------- helm/capif/charts/loki/values.yaml | 14 +++++++------- helm/capif/charts/mock-server/values.yaml | 14 +++++++------- helm/capif/charts/mongo-express/values.yaml | 14 +++++++------- .../charts/mongo-register-express/values.yaml | 14 +++++++------- helm/capif/charts/mongo-register/values.yaml | 14 +++++++------- helm/capif/charts/mongo/values.yaml | 14 +++++++------- helm/capif/charts/nginx/values.yaml | 14 +++++++------- .../charts/ocf-access-control-policy/values.yaml | 14 +++++++------- .../charts/ocf-api-invocation-logs/values.yaml | 14 +++++++------- .../charts/ocf-api-invoker-management/values.yaml | 14 +++++++------- .../charts/ocf-api-provider-management/values.yaml | 14 +++++++------- .../capif/charts/ocf-auditing-api-logs/values.yaml | 14 +++++++------- .../charts/ocf-discover-service-api/values.yaml | 14 +++++++------- helm/capif/charts/ocf-events/values.yaml | 14 +++++++------- helm/capif/charts/ocf-helper/values.yaml | 14 +++++++------- .../charts/ocf-publish-service-api/values.yaml | 14 +++++++------- helm/capif/charts/ocf-register/values.yaml | 14 +++++++------- helm/capif/charts/ocf-routing-info/values.yaml | 14 +++++++------- helm/capif/charts/ocf-security/values.yaml | 14 +++++++------- helm/capif/charts/otelcollector/values.yaml | 14 +++++++------- helm/capif/charts/redis/values.yaml | 14 +++++++------- helm/capif/charts/renderer/values.yaml | 14 +++++++------- helm/monitoring-stack/charts/grafana/values.yaml | 14 +++++++------- .../monitoring-stack/charts/prometheus/values.yaml | 14 +++++++------- helm/monitoring-stack/charts/skooner/values.yaml | 14 +++++++------- 27 files changed, 189 insertions(+), 189 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index f8d35e6..edaff9d 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index d9eb4a0..75096bc 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -75,17 +75,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi autoscaling: enabled: false diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index c90a654..ae05d76 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -68,17 +68,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index e5506eb..bc603e6 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index 45147b6..c6efe68 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -66,17 +66,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index 9f21355..a8be69d 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -67,17 +67,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 7bf2693..2aa2505 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index db4b443..aebdcdf 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index 98ef4c2..6056af8 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -74,17 +74,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index e95a5da..4f5ea20 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index bbed22b..63b9ed7 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index 5971f07..77e81e7 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 196b9dc..147412b 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index b1323a4..38ecd36 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index 485b02a..f1bde0b 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index 2c96542..cf51dea 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-helper/values.yaml b/helm/capif/charts/ocf-helper/values.yaml index 6062246..71a8b98 100644 --- a/helm/capif/charts/ocf-helper/values.yaml +++ b/helm/capif/charts/ocf-helper/values.yaml @@ -72,17 +72,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index e93b53e..f420aa4 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index e24cf75..4ce2d64 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -76,17 +76,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index 1c40a17..aef81f5 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index dcbe5d4..765b7c7 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index 363c18b..678d958 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index e85e73c..2bae357 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -62,17 +62,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index 16f3c15..d2d3e77 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi livenessProbe: httpGet: diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index bd6d0ed..43105a9 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -70,17 +70,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 408d35b..633077e 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -68,17 +68,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index 012054f..a1bc892 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -58,17 +58,17 @@ ingress: # hosts: # - chart-example.local -resources: +resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 100m - memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi autoscaling: enabled: false -- GitLab From 3792236581f4de86b2f2386ae07694cea170d5df Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Tue, 17 Sep 2024 13:55:06 +0200 Subject: [PATCH 34/36] Update run remote capif tests scripts --- helm/scripts/run_remote_capif_tests.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/helm/scripts/run_remote_capif_tests.sh b/helm/scripts/run_remote_capif_tests.sh index 37bba19..6f67bb5 100755 --- a/helm/scripts/run_remote_capif_tests.sh +++ b/helm/scripts/run_remote_capif_tests.sh @@ -30,6 +30,13 @@ echo "CAPIF_VAULT_PORT = $VAULT_PORT" echo "CAPIF_VAULT_TOKEN = $VAULT_ACCESS_TOKEN" echo "MOCK_SERVER_URL = $MOCK_SERVER_URL" +INPUT_OPTIONS=$@ +# Check if input is provided +if [ -z "$1" ]; then + # Set default value if no input is provided + INPUT_OPTIONS="--include all" +fi + cd $CAPIF_BASE_DIR docker >/dev/null 2>/dev/null @@ -68,4 +75,4 @@ docker run -ti --rm --network="host" \ --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ - --variable MOCK_SERVER_URL:$MOCK_SERVER_URL $@ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL $INPUT_OPTIONS -- GitLab From 4612f15437d05b23c1ee994ca3c02c3dca3c3b8a Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 19 Sep 2024 11:40:17 +0200 Subject: [PATCH 35/36] Setup default request and limits on variables according of minimun valid value --- helm/scripts/variables.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh index 0f5f030..02756d0 100755 --- a/helm/scripts/variables.sh +++ b/helm/scripts/variables.sh @@ -59,10 +59,10 @@ export MONITORING_SNOOKER_ENABLED=false # OpenCAPIF deployment variables export CAPIF_RESOURCES_RESERVE="YES" -export CAPIF_RESOURCES_LIMITS_CPU=100m -export CAPIF_RESOURCES_LIMITS_MEMORY=128Mi -export CAPIF_RESOURCES_REQUESTS_CPU=100m -export CAPIF_RESOURCES_REQUESTS_MEMORY=128Mi +export CAPIF_RESOURCES_LIMITS_CPU=200m +export CAPIF_RESOURCES_LIMITS_MEMORY=256Mi +export CAPIF_RESOURCES_REQUESTS_CPU=1m +export CAPIF_RESOURCES_REQUESTS_MEMORY=1Mi ## Storage Class export CAPIF_STORAGE_CLASS=nfs-01 export CAPIF_STORAGE_ACCESS_MODE="ReadWriteMany" @@ -177,8 +177,9 @@ if [ "$CAPIF_RESOURCES_RESERVE" == "NO" ]; then ${SED_CMD} -i "s/^ memory:/# memory:/g" $HELM_DIR/**/**/**/values.yaml ${SED_CMD} -i "s/^ requests:/# requests:/g" $HELM_DIR/**/**/**/values.yaml else + CAPIF_RESOURCES_RESERVE="" for service in "${capif_services[@]}"; do - CAPIF_RESOURCES_RESERVE="--set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU + CAPIF_RESOURCES_RESERVE="$CAPIF_RESOURCES_RESERVE --set $service.resources.limits.cpu=$CAPIF_RESOURCES_LIMITS_CPU --set $service.resources.limits.memory=$CAPIF_RESOURCES_LIMITS_MEMORY --set $service.resources.requests.cpu=$CAPIF_RESOURCES_REQUESTS_CPU --set $service.resources.requests.memory=$CAPIF_RESOURCES_REQUESTS_MEMORY " -- GitLab From 8ddc1ce7fe03ec519827e5b3900b2d4bb05857fb Mon Sep 17 00:00:00 2001 From: Jorge Moratinos Salcines Date: Thu, 19 Sep 2024 11:51:13 +0200 Subject: [PATCH 36/36] Default values for requests and limits at resources were set --- helm/capif/charts/fluentbit/values.yaml | 14 +++++++------- helm/capif/charts/grafana/values.yaml | 14 +++++++------- helm/capif/charts/loki/values.yaml | 14 +++++++------- helm/capif/charts/mock-server/values.yaml | 14 +++++++------- helm/capif/charts/mongo-express/values.yaml | 14 +++++++------- .../charts/mongo-register-express/values.yaml | 14 +++++++------- helm/capif/charts/mongo-register/values.yaml | 14 +++++++------- helm/capif/charts/mongo/values.yaml | 14 +++++++------- helm/capif/charts/nginx/values.yaml | 14 +++++++------- .../charts/ocf-access-control-policy/values.yaml | 14 +++++++------- .../charts/ocf-api-invocation-logs/values.yaml | 14 +++++++------- .../charts/ocf-api-invoker-management/values.yaml | 14 +++++++------- .../charts/ocf-api-provider-management/values.yaml | 14 +++++++------- .../capif/charts/ocf-auditing-api-logs/values.yaml | 14 +++++++------- .../charts/ocf-discover-service-api/values.yaml | 14 +++++++------- helm/capif/charts/ocf-events/values.yaml | 14 +++++++------- helm/capif/charts/ocf-helper/values.yaml | 14 +++++++------- .../charts/ocf-publish-service-api/values.yaml | 14 +++++++------- helm/capif/charts/ocf-register/values.yaml | 14 +++++++------- helm/capif/charts/ocf-routing-info/values.yaml | 14 +++++++------- helm/capif/charts/ocf-security/values.yaml | 14 +++++++------- helm/capif/charts/otelcollector/values.yaml | 14 +++++++------- helm/capif/charts/redis/values.yaml | 14 +++++++------- helm/capif/charts/renderer/values.yaml | 14 +++++++------- helm/monitoring-stack/charts/grafana/values.yaml | 14 +++++++------- .../monitoring-stack/charts/prometheus/values.yaml | 14 +++++++------- helm/monitoring-stack/charts/skooner/values.yaml | 14 +++++++------- 27 files changed, 189 insertions(+), 189 deletions(-) diff --git a/helm/capif/charts/fluentbit/values.yaml b/helm/capif/charts/fluentbit/values.yaml index edaff9d..cd9ae42 100644 --- a/helm/capif/charts/fluentbit/values.yaml +++ b/helm/capif/charts/fluentbit/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/grafana/values.yaml b/helm/capif/charts/grafana/values.yaml index 75096bc..5cae1de 100644 --- a/helm/capif/charts/grafana/values.yaml +++ b/helm/capif/charts/grafana/values.yaml @@ -75,17 +75,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi autoscaling: enabled: false diff --git a/helm/capif/charts/loki/values.yaml b/helm/capif/charts/loki/values.yaml index ae05d76..b7bbadc 100644 --- a/helm/capif/charts/loki/values.yaml +++ b/helm/capif/charts/loki/values.yaml @@ -68,17 +68,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mock-server/values.yaml b/helm/capif/charts/mock-server/values.yaml index bc603e6..058c7fe 100644 --- a/helm/capif/charts/mock-server/values.yaml +++ b/helm/capif/charts/mock-server/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/mongo-express/values.yaml b/helm/capif/charts/mongo-express/values.yaml index c6efe68..76ce47e 100644 --- a/helm/capif/charts/mongo-express/values.yaml +++ b/helm/capif/charts/mongo-express/values.yaml @@ -66,17 +66,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register-express/values.yaml b/helm/capif/charts/mongo-register-express/values.yaml index a8be69d..f174fbd 100644 --- a/helm/capif/charts/mongo-register-express/values.yaml +++ b/helm/capif/charts/mongo-register-express/values.yaml @@ -67,17 +67,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo-register/values.yaml b/helm/capif/charts/mongo-register/values.yaml index 2aa2505..d52301d 100644 --- a/helm/capif/charts/mongo-register/values.yaml +++ b/helm/capif/charts/mongo-register/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/mongo/values.yaml b/helm/capif/charts/mongo/values.yaml index aebdcdf..6e15c56 100644 --- a/helm/capif/charts/mongo/values.yaml +++ b/helm/capif/charts/mongo/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/nginx/values.yaml b/helm/capif/charts/nginx/values.yaml index 6056af8..43e1d05 100644 --- a/helm/capif/charts/nginx/values.yaml +++ b/helm/capif/charts/nginx/values.yaml @@ -74,17 +74,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-access-control-policy/values.yaml b/helm/capif/charts/ocf-access-control-policy/values.yaml index 4f5ea20..1f2ce41 100644 --- a/helm/capif/charts/ocf-access-control-policy/values.yaml +++ b/helm/capif/charts/ocf-access-control-policy/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invocation-logs/values.yaml b/helm/capif/charts/ocf-api-invocation-logs/values.yaml index 63b9ed7..756ccbf 100644 --- a/helm/capif/charts/ocf-api-invocation-logs/values.yaml +++ b/helm/capif/charts/ocf-api-invocation-logs/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-invoker-management/values.yaml b/helm/capif/charts/ocf-api-invoker-management/values.yaml index 77e81e7..4b19037 100644 --- a/helm/capif/charts/ocf-api-invoker-management/values.yaml +++ b/helm/capif/charts/ocf-api-invoker-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-api-provider-management/values.yaml b/helm/capif/charts/ocf-api-provider-management/values.yaml index 147412b..1822526 100644 --- a/helm/capif/charts/ocf-api-provider-management/values.yaml +++ b/helm/capif/charts/ocf-api-provider-management/values.yaml @@ -71,17 +71,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-auditing-api-logs/values.yaml b/helm/capif/charts/ocf-auditing-api-logs/values.yaml index 38ecd36..2c0231e 100644 --- a/helm/capif/charts/ocf-auditing-api-logs/values.yaml +++ b/helm/capif/charts/ocf-auditing-api-logs/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-discover-service-api/values.yaml b/helm/capif/charts/ocf-discover-service-api/values.yaml index f1bde0b..1d1c5d6 100644 --- a/helm/capif/charts/ocf-discover-service-api/values.yaml +++ b/helm/capif/charts/ocf-discover-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-events/values.yaml b/helm/capif/charts/ocf-events/values.yaml index cf51dea..9fb7b06 100644 --- a/helm/capif/charts/ocf-events/values.yaml +++ b/helm/capif/charts/ocf-events/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-helper/values.yaml b/helm/capif/charts/ocf-helper/values.yaml index 71a8b98..8a30745 100644 --- a/helm/capif/charts/ocf-helper/values.yaml +++ b/helm/capif/charts/ocf-helper/values.yaml @@ -72,17 +72,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/ocf-publish-service-api/values.yaml b/helm/capif/charts/ocf-publish-service-api/values.yaml index f420aa4..0e243db 100644 --- a/helm/capif/charts/ocf-publish-service-api/values.yaml +++ b/helm/capif/charts/ocf-publish-service-api/values.yaml @@ -65,17 +65,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-register/values.yaml b/helm/capif/charts/ocf-register/values.yaml index 4ce2d64..71b49d9 100644 --- a/helm/capif/charts/ocf-register/values.yaml +++ b/helm/capif/charts/ocf-register/values.yaml @@ -76,17 +76,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-routing-info/values.yaml b/helm/capif/charts/ocf-routing-info/values.yaml index aef81f5..b92b884 100644 --- a/helm/capif/charts/ocf-routing-info/values.yaml +++ b/helm/capif/charts/ocf-routing-info/values.yaml @@ -63,17 +63,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/ocf-security/values.yaml b/helm/capif/charts/ocf-security/values.yaml index 765b7c7..cdccc70 100644 --- a/helm/capif/charts/ocf-security/values.yaml +++ b/helm/capif/charts/ocf-security/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/otelcollector/values.yaml b/helm/capif/charts/otelcollector/values.yaml index 678d958..e7e0e5f 100644 --- a/helm/capif/charts/otelcollector/values.yaml +++ b/helm/capif/charts/otelcollector/values.yaml @@ -69,17 +69,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: # httpGet: diff --git a/helm/capif/charts/redis/values.yaml b/helm/capif/charts/redis/values.yaml index 2bae357..ba9a6d2 100644 --- a/helm/capif/charts/redis/values.yaml +++ b/helm/capif/charts/redis/values.yaml @@ -62,17 +62,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: tcpSocket: diff --git a/helm/capif/charts/renderer/values.yaml b/helm/capif/charts/renderer/values.yaml index d2d3e77..1270291 100644 --- a/helm/capif/charts/renderer/values.yaml +++ b/helm/capif/charts/renderer/values.yaml @@ -64,17 +64,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi livenessProbe: httpGet: diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml index 43105a9..5c54eed 100644 --- a/helm/monitoring-stack/charts/grafana/values.yaml +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -70,17 +70,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml index 633077e..9d35e89 100644 --- a/helm/monitoring-stack/charts/prometheus/values.yaml +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -68,17 +68,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi autoscaling: enabled: false diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml index a1bc892..ea8b353 100644 --- a/helm/monitoring-stack/charts/skooner/values.yaml +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -58,17 +58,17 @@ ingress: # hosts: # - chart-example.local -resources: {} +resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 1m + memory: 1Mi autoscaling: enabled: false -- GitLab