diff --git a/helm/DELETE.txt b/helm/DELETE.txt deleted file mode 100644 index 2d030d7bc1bbfbdee332aaf691447b30cdea375b..0000000000000000000000000000000000000000 --- a/helm/DELETE.txt +++ /dev/null @@ -1 +0,0 @@ -delete me diff --git a/helm/helmfile-capif.yaml b/helm/helmfile-capif.yaml deleted file mode 100644 index 610e64a4d19b92696fa23bc3ecf9dd698546a5b5..0000000000000000000000000000000000000000 --- a/helm/helmfile-capif.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# helm upgrade --install -n mon monitoring-capif capif/ --set nginx.nginx.env.capifHostname=mon-capif.monitoring.int \ -# --set ingress_ip.oneke="10.17.173.127" --set env=oneke --atomic -helmDefaults: - createNamespace: true - timeout: 600 -releases: - - name: monitoring-capif - chart: ./capif/ - namespace: monitoring -# atomic: true - wait: true - values: - - ./capif/values.yaml - - nginx: - nginx: - env: - capifHostname: monitoring-capif.monitoring.int - - ingress: - ip: "10.17.173.127" \ No newline at end of file diff --git a/helm/monitoring-stack/Chart.yaml b/helm/monitoring-stack/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb7335de510097458839554f6a6bad2cbacca226 --- /dev/null +++ b/helm/monitoring-stack/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: Helm monitoring stack +description: A Helm chart for Kubernetes monitoring stack +type: application +version: 1.0.0 +appVersion: "1.0.0" + +dependencies: + - name: grafana + version: "*" + condition: grafana.enabled + - name: prometheus + version: "*" + condition: prometheus.enabled + - name: skooner + version: "*" + condition: skooner.enabled + - name: metrics-server + repository: 'https://charts.bitnami.com/bitnami' + version: 6.9.3 + condition: metrics-server.enabled \ No newline at end of file diff --git a/helm/monitoring-stack/README.md b/helm/monitoring-stack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..675d14dbb77d3204a9e88ecb71bc026f6e82e88d --- /dev/null +++ b/helm/monitoring-stack/README.md @@ -0,0 +1,43 @@ +# Helm monitoring stack + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) + +A Helm chart for Kubernetes monitoring stack + +## Requirements + +``` +$ helm dependency build helm-monitoring-stack/ +$ helm upgrade --install -n monitoring monitoring helm-monitoring-stack/ --create-namespace +``` + + +| Repository | Name | Version | +|------------|------|---------| +| | grafana | | +| | prometheus | | +| | skooner | | +| https://charts.bitnami.com/bitnami | metrics-server | 6.9.3 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| grafana.hosts[0].host | string | `"grafana-dt.tactile5g.int"` | | +| grafana.hosts[0].paths[0].path | string | `"/"` | | +| grafana.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| grafana.ingress.enable | bool | `true` | | +| metrics-server.apiService.create | bool | `true` | | +| metrics-server.extraArgs[0] | string | `"--kubelet-insecure-tls=true"` | | +| metrics-server.extraArgs[1] | string | `"--kubelet-preferred-address-types=InternalIP"` | | +| prometheus.hosts[0].host | string | `"prometheus-dt.tactile5g.int"` | | +| prometheus.hosts[0].paths[0].path | string | `"/"` | | +| prometheus.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| prometheus.ingress.enabled | bool | `true` | | +| skooner.hosts[0].host | string | `"skooner.tactile5g.com"` | | +| skooner.hosts[0].paths[0].path | string | `"/"` | | +| skooner.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| skooner.ingress.enabled | bool | `true` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/monitoring-stack/charts/grafana/.helmignore b/helm/monitoring-stack/charts/grafana/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/monitoring-stack/charts/grafana/Chart.yaml b/helm/monitoring-stack/charts/grafana/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c7c54e5d4e37c3a84aea8825c5e447a5bc6a7d3 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: grafana +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helm/monitoring-stack/charts/grafana/README.md b/helm/monitoring-stack/charts/grafana/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5325ad0dd72a30d59b8293e64d5a5f624857d0c9 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/README.md @@ -0,0 +1,51 @@ +# grafana + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) + +A Helm chart for Kubernetes + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `100` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| env.gfAuthAnonymousEnable | bool | `true` | | +| env.gfAuthAnonymousOrgRole | string | `"Admin"` | | +| env.gfSecurityAdminPassword | string | `"secure_pass"` | | +| env.gfSecurityAllowEmbedding | bool | `true` | | +| env.prometheusUrl | string | `"http://prometheus.prometheus-system.svc.cluster.local:9090"` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"Always"` | | +| image.repository | string | `"grafana/grafana"` | | +| image.tag | string | `"latest"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `"nginx"` | | +| ingress.enabled | bool | `true` | | +| ingress.environment | string | `"edge"` | | +| ingress.hosts[0].host | string | `"grafana-dt.tactile5g.int"` | | +| ingress.hosts[0].paths[0].path | string | `"/"` | | +| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| ingress.tls | list | `[]` | | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| persistence.enable | bool | `false` | | +| persistence.storage | string | `"10Gi"` | | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext | object | `{}` | | +| service.port | int | `80` | | +| service.type | string | `"NodePort"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tolerations | list | `[]` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/monitoring-stack/charts/grafana/kubernetes-dashboard.json b/helm/monitoring-stack/charts/grafana/kubernetes-dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..ac97f80ebd0139f383c3f82920b9b9aec7b8c1e9 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/kubernetes-dashboard.json @@ -0,0 +1,2629 @@ +{ + "annotations": { + "list": [ + { + "$$hashKey": "object:103", + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Monitors Kubernetes cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 12740, + "graphTooltip": 0, + "id": 7, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 33, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Network I/O pressure", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 1 + }, + "height": "200px", + "hiddenSeries": false, + "id": 32, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "format": "Bps", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 34, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Total usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 4, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Cluster memory usage", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 6, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Cluster CPU usage (1m avg)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 7, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 10 + } + ], + "title": "Cluster filesystem usage", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 13 + }, + "id": 9, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 13 + }, + "id": 10, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 13 + }, + "id": 11, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 13 + }, + "id": 12, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 13 + }, + "id": 13, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 13 + }, + "id": 14, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.0.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 35, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods CPU usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 3, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "height": "", + "hiddenSeries": false, + "id": 17, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "editorMode": "code", + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod }}", + "metric": "container_cpu", + "range": true, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Pods CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:112", + "format": "none", + "label": "cores", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:113", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 39, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods memory usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "editorMode": "code", + "expr": "sum (container_memory_working_set_bytes{image!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}) by (pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod }}", + "metric": "container_memory_usage:sort_desc", + "range": true, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Pods memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:181", + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:182", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 43, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods network I/O", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 33 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "editorMode": "code", + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ pod }}", + "metric": "network", + "range": true, + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (pod)", + "hide": true, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ pod }}", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Pods network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 37, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 3, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 41 + }, + "height": "", + "hiddenSeries": false, + "id": 24, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (container, pod)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod }}| {{ container }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_cpu", + "refId": "B", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_cpu", + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Containers CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:337", + "format": "none", + "label": "cores", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:338", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Containers CPU usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 41 + }, + "id": 41, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 42 + }, + "hiddenSeries": false, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container!=\"POD\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}) by (container, pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "pod: {{ pod }} | {{ container }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}) by (kubernetes_io_hostname, name, image)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "container_memory_usage:sort_desc", + "refId": "B", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}) by (kubernetes_io_hostname, rkt_container_name)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Containers memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:406", + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:407", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Containers memory usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 42 + }, + "id": 44, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 43 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (container, pod)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> pod: {{ pod }} | {{ container }}", + "metric": "network", + "refId": "B", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (container, pod)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- pod: {{ pod }} | {{ container }}", + "metric": "network", + "refId": "D", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, name, image)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})", + "metric": "network", + "refId": "C", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "E", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\",namespace=~\"^$namespace$\"}[1m])) by (kubernetes_io_hostname, rkt_container_name)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}", + "metric": "network", + "refId": "F", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Containers network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Containers network I/O", + "type": "row" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 36, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "System services CPU usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 3, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 44 + }, + "height": "", + "hiddenSeries": false, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "editorMode": "code", + "expr": "sum (rate (container_cpu_usage_seconds_total{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (systemd_service_name)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ systemd_service_name }}", + "metric": "container_cpu", + "range": true, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "System services CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 51 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 29 + }, + "hiddenSeries": false, + "id": 26, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ systemd_service_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "System services memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "System services memory usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 38, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 3, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 46 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.0.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "All processes CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:254", + "format": "none", + "label": "cores", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:255", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes CPU usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 53 + }, + "id": 42, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 47 + }, + "hiddenSeries": false, + "id": 28, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "All processes memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes memory usage", + "type": "row" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 45, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 29, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ id }}", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ id }}", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "All processes network I/O (1m avg)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes network I/O", + "type": "row" + } + ], + "refresh": "10s", + "schemaVersion": 38, + "style": "dark", + "tags": [ + "kubernetes" + ], + "templating": { + "list": [ + { + "allValue": "", + "current": { + "selected": true, + "text": "monitoring", + "value": "monitoring" + }, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "definition": "label_values(namespace)", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "af6b44aa-0703-4979-825c-c1afba946534" + }, + "definition": "", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "Node", + "options": [], + "query": "label_values(kubernetes_io_hostname)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Monitoring Dashboard", + "uid": "msqzbWjWk", + "version": 2, + "weekStart": "" + } \ No newline at end of file diff --git a/helm/monitoring-stack/charts/grafana/templates/NOTES.txt b/helm/monitoring-stack/charts/grafana/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7ac87e80664a0f6ec0466ce8fda3d4ef8c08758 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "grafana.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/helm/monitoring-stack/charts/grafana/templates/_helpers.tpl b/helm/monitoring-stack/charts/grafana/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..993f46bdd8df0131780dfe58e5019bdb8e29eb53 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/grafana/templates/configmap.yaml b/helm/monitoring-stack/charts/grafana/templates/configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91740d481e21233059799559d3b3cc46522abd62 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: datasources +data: + datasources.yaml: | + apiVersion: 1 + datasources: + - name: Prometheus + type: prometheus + typeName: Prometheus + typeLogoUrl: public/app/plugins/datasource/prometheus/img/prometheus_logo.svg + access: proxy + url: {{ .Values.env.prometheusUrl }} + uid: af6b44aa-0703-4979-825c-c1afba946534 + user: '' + database: '' + basicAuth: false + isDefault: false + jsonData: + httpMethod: POST + prometheusType: Prometheus + prometheusVersion: 2.40.1 + readOnly: false +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: default +data: + default.yaml: | + apiVersion: 1 + providers: + - name: Default # A uniquely identifiable name for the provider + orgId: 1 + folder: "" # The folder where to place the dashboards + folderUid: "" + type: file + disableDeletion: false + allowUiUpdates: true + options: + path: /var/lib/grafana/dashboards + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubernetes-dashboard +data: + kubernetes-dashboard.json: | +{{ .Files.Get "kubernetes-dashboard.json" | indent 4 }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/grafana/templates/deployment.yaml b/helm/monitoring-stack/charts/grafana/templates/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f720c65243c4a2cc7e1c7cf642bbd62108cf032 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/deployment.yaml @@ -0,0 +1,102 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + strategy: + type: Recreate + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "grafana.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - env: + - name: GF_AUTH_ANONYMOUS_ENABLED + value: {{ quote .Values.env.gfAuthAnonymousEnable }} + - name: GF_SECURITY_ALLOW_EMBEDDING + value: {{ quote .Values.env.gfSecurityAllowEmbedding }} + - name: GF_PATHS_PROVISIONING + value: /etc/grafana/provisioning + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + envFrom: + - secretRef: + name: grafana-secrets + ports: + - name: http + containerPort: 3000 + protocol: TCP +# livenessProbe: +# tcpSocket: +# port: {{ .Values.service.port }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: grafana-datasources + mountPath: /etc/grafana/provisioning/datasources/datasources.yaml + subPath: datasources.yaml + - name: grafana-default + mountPath: /etc/grafana/provisioning/dashboards/default.yaml + subPath: default.yaml + - name: kubernetes-dashboard + mountPath: /var/lib/grafana/dashboards/kubernetes-dashboard.json + subPath: kubernetes-dashboard.json + {{- if .Values.persistence.enable | default false }} + - name: grafana-claim0 + mountPath: /var/lib/grafana + {{- end }} + volumes: + - name: grafana-datasources + configMap: + name: datasources + items: + - key: "datasources.yaml" + path: "datasources.yaml" + - name: grafana-default + configMap: + name: default + items: + - key: "default.yaml" + path: "default.yaml" + - name: kubernetes-dashboard + configMap: + name: kubernetes-dashboard + items: + - key: "kubernetes-dashboard.json" + path: "kubernetes-dashboard.json" + {{- if .Values.persistence.enable | default false }} + - name: grafana-claim0 + persistentVolumeClaim: + claimName: grafana-claim0 + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/grafana/templates/hpa.yaml b/helm/monitoring-stack/charts/grafana/templates/hpa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2384525742019bcbad67da4ad13393467e8bad50 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/grafana/templates/ingress.yaml b/helm/monitoring-stack/charts/grafana/templates/ingress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc17ff2f53dc3312d0c94e8302d9700a48e0f31e --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/ingress.yaml @@ -0,0 +1,99 @@ +{{- if .Values.ingress.enabled -}} +{{- if eq .Values.ingress.environment "edge" }} +{{- $fullName := include "grafana.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName | trunc 30 }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- if .Values.ingress.enabled }} +{{- if or (eq .Values.ingress.environment "region") (eq .Values.ingress.environment "wavelength") }} +{{- $fullName := include "grafana.fullname" . -}} +{{- $svcPort := .Values.service.port -}} + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName | trunc 30 }} + namespace: {{ .Release.Namespace }} + annotations: + alb.ingress.kubernetes.io/load-balancer-name: {{ $fullName | trunc 30 }} + alb.ingress.kubernetes.io/target-type: instance + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/tags: tactile5g/digital-twins={{ .Release.Namespace }} +spec: + ingressClassName: alb + defaultBackend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/grafana/templates/secrets.yaml b/helm/monitoring-stack/charts/grafana/templates/secrets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7cc34fafa8c021b449500f071a3d6777ec2a7786 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/secrets.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: grafana-secrets +type: Opaque +data: + GF_AUTH_ANONYMOUS_ORG_ROLE: {{ .Values.env.gfAuthAnonymousOrgRole | b64enc | quote }} + GF_SECURITY_ADMIN_PASSWORD: {{ .Values.env.gfSecurityAdminPassword | b64enc | quote }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/grafana/templates/service.yaml b/helm/monitoring-stack/charts/grafana/templates/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98de07507a62ea7a39712a8118d0f8505cceced9 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} diff --git a/helm/monitoring-stack/charts/grafana/templates/serviceaccount.yaml b/helm/monitoring-stack/charts/grafana/templates/serviceaccount.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47782b5cd7ecf125c47c15031fc6b8ce309e1174 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "grafana.serviceAccountName" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/grafana/templates/tests/test-connection.yaml b/helm/monitoring-stack/charts/grafana/templates/tests/test-connection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..805404c03297ca35941f4dbe5b3023381aa9de61 --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "grafana.fullname" . }}-test-connection" + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "grafana.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/monitoring-stack/charts/grafana/values.yaml b/helm/monitoring-stack/charts/grafana/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b519b9a4965a2fecc341e5777a74de66427cebd --- /dev/null +++ b/helm/monitoring-stack/charts/grafana/values.yaml @@ -0,0 +1,96 @@ +# Default values for grafana. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: grafana/grafana + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# PENDING: gfAuthAnonymousOrgRole and gfSecurityAdminPassword as aws external-secret +env: + gfAuthAnonymousEnable: true + gfSecurityAllowEmbedding: true + gfAuthAnonymousOrgRole: Admin + gfSecurityAdminPassword: secure_pass + prometheusUrl: http://prometheus.prometheus-system.svc.cluster.local:9090 + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +persistence: + enable: false + storage: 10Gi + +service: + type: NodePort + port: 80 + +ingress: + enabled: true + className: "nginx" + # edge, region, wavelength + environment: "edge" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: grafana-dt.tactile5g.int + paths: + - path: / + pathType: Prefix + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/helm/monitoring-stack/charts/prometheus/.helmignore b/helm/monitoring-stack/charts/prometheus/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/monitoring-stack/charts/prometheus/Chart.yaml b/helm/monitoring-stack/charts/prometheus/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..29cd94126b2e91cfbb88923ad89df12a08fa4976 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: prometheus +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helm/monitoring-stack/charts/prometheus/README.md b/helm/monitoring-stack/charts/prometheus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..238189ff5b1c39396b5c47c0d9ec871dafe164f3 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/README.md @@ -0,0 +1,48 @@ +# prometheus + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) + +A Helm chart for Kubernetes + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `100` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"Always"` | | +| image.repository | string | `"prom/prometheus"` | | +| image.tag | string | `"latest"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `"nginx"` | | +| ingress.enabled | bool | `false` | | +| ingress.environment | string | `"edge"` | | +| ingress.hosts[0].host | string | `"prometheus-dt.tactile5g.int"` | | +| ingress.hosts[0].paths[0].path | string | `"/"` | | +| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| ingress.tls | list | `[]` | | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| persistence.enable | bool | `false` | | +| persistence.storage | string | `"10Gi"` | | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext | object | `{}` | | +| service.port | int | `9090` | | +| service.type | string | `"NodePort"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `"prometheus"` | | +| tolerations | list | `[]` | | +| tsdb.path | string | `"/prometheus/"` | | +| tsdb.retentionTime | string | `"5d"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/monitoring-stack/charts/prometheus/templates/NOTES.txt b/helm/monitoring-stack/charts/prometheus/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..67b2c0ec40898320933d3c1c0b71e026fabc991d --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prometheus.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/helm/monitoring-stack/charts/prometheus/templates/_helpers.tpl b/helm/monitoring-stack/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..42af2347d259affaf057e8ad836ff7bca686c703 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "prometheus.labels" -}} +helm.sh/chart: {{ include "prometheus.chart" . }} +{{ include "prometheus.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus.selectorLabels" -}} +app.kubernetes.io/name: {{ include "prometheus.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "prometheus.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/prometheus/templates/cluster-role.yaml b/helm/monitoring-stack/charts/prometheus/templates/cluster-role.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff7fe8f0f632807432dc1622fdc70774eefdd9e3 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/cluster-role.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus + labels: + app: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: + - extensions + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus + namespace: {{ .Release.Namespace }} + labels: + app: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus + labels: + app: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/prometheus/templates/configmap.yaml b/helm/monitoring-stack/charts/prometheus/templates/configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e7b6796dd53c47f9f2b8ec8a19246a7e6c7d315 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/configmap.yaml @@ -0,0 +1,137 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: prometheus + name: prometheus-config +data: + prometheus.rules: |- + groups: + - name: devopscube alert + rules: + - alert: High Pod Memory + expr: sum(container_memory_usage_bytes) > 1 + for: 1m + labels: + severity: slack + annotations: + summary: High Memory Usage + prometheus.yml: |- + global: + scrape_interval: 30s + scrape_timeout: 10s + scrape_configs: + #------------- configuration to collect pods metrics kubelet ------------------- + - job_name: 'kubernetes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + #------------- configuration to collect pods metrics ------------------- + - job_name: 'kubernetes-pods' + honor_labels: true + kubernetes_sd_configs: + - role: pod + relabel_configs: + # select only those pods that has "prometheus.io/scrape: true" annotation + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + # set metrics_path (default is /metrics) to the metrics path specified in "prometheus.io/path: " annotation. + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # set the scrapping port to the port specified in "prometheus.io/port: " annotation and set address accordingly. + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + + #-------------- configuration to collect metrics from service endpoints ----------------------- + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # select only those endpoints whose service has "prometheus.io/scrape: true" annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + # set the metrics_path to the path specified in "prometheus.io/path: " annotation. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # set the scrapping port to the port specified in "prometheus.io/port: " annotation and set address accordingly. + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + + #---------------- configuration to collect metrics from kubernetes apiserver ------------------------- + - job_name: 'kubernetes-apiservers' + honor_labels: true + kubernetes_sd_configs: + - role: endpoints + # kubernetes apiserver serve metrics on a TLS secure endpoints. so, we have to use "https" scheme + scheme: https + # we have to provide certificate to establish tls secure connection + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # bearer_token_file is required for authorizating prometheus server to kubernetes apiserver + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + #--------------- configuration to collect metrics from nodes ----------------------- + - job_name: 'kubernetes-nodes' + honor_labels: true + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics \ No newline at end of file diff --git a/helm/monitoring-stack/charts/prometheus/templates/deployment.yaml b/helm/monitoring-stack/charts/prometheus/templates/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..31333a9c03efbe1cea43559191b0ee04c51e0cda --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "prometheus.fullname" . }} + labels: + {{- include "prometheus.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + strategy: + type: Recreate + {{- end }} + selector: + matchLabels: + {{- include "prometheus.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "prometheus.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + args: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path={{.Values.tsdb.path }}" + - "--storage.tsdb.retention.time={{.Values.tsdb.retentionTime }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + tcpSocket: + port: {{ .Values.service.port }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus/ + {{- if .Values.persistence.enable | default false }} + - name: prometheus-storage-volume + mountPath: /prometheus/ + {{ else }} + - name: prometheus-storage + mountPath: /prometheus/ + {{- end }} + volumes: + - name: prometheus-config + configMap: + defaultMode: 420 + name: prometheus-config + {{- if .Values.persistence.enable | default false }} + - name: prometheus-storage-volume + persistentVolumeClaim: + claimName: prometheus-pvc + {{ else }} + - name: prometheus-storage + emptyDir: {} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/monitoring-stack/charts/prometheus/templates/hpa.yaml b/helm/monitoring-stack/charts/prometheus/templates/hpa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26d07f4929f91aaf8f275e086cf929b1ee4df0e5 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "prometheus.fullname" . }} + labels: + {{- include "prometheus.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "prometheus.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/prometheus/templates/ingress.yaml b/helm/monitoring-stack/charts/prometheus/templates/ingress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1100cf9568aa3a58b6e3fe0aceb16b644264850a --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/ingress.yaml @@ -0,0 +1,99 @@ +{{- if .Values.ingress.enabled -}} +{{- if eq .Values.ingress.environment "edge" }} +{{- $fullName := include "prometheus.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "prometheus.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- if .Values.ingress.enabled }} +{{- if or (eq .Values.ingress.environment "region") (eq .Values.ingress.environment "wavelength") }} +{{- $fullName := include "prometheus.fullname" . -}} +{{- $svcPort := .Values.service.port -}} + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName | trunc 30 }} + namespace: {{ .Release.Namespace }} + annotations: + alb.ingress.kubernetes.io/load-balancer-name: {{ $fullName | trunc 30 }} + alb.ingress.kubernetes.io/target-type: instance + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/tags: tactile5g/digital-twins={{ .Release.Namespace }} +spec: + ingressClassName: alb + defaultBackend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml b/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d9c2dbeb410e3123fee320347204800bfa51d57c --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/pvc.yaml @@ -0,0 +1,14 @@ +{{- if .Values.persistence.enable | default false }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: prometheus-pvc + labels: + {{- include "prometheus.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.persistence.storage }} +{{- end }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/prometheus/templates/service.yaml b/helm/monitoring-stack/charts/prometheus/templates/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eb05a25865c74149429790d0eae652d6f48e9ef4 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "prometheus.fullname" . }} + labels: + {{- include "prometheus.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "prometheus.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/prometheus/templates/serviceaccount.yaml b/helm/monitoring-stack/charts/prometheus/templates/serviceaccount.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06fb6f12777cad26277b3069572119d789b59380 --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "prometheus.serviceAccountName" . }} + labels: + {{- include "prometheus.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/prometheus/templates/tests/test-connection.yaml b/helm/monitoring-stack/charts/prometheus/templates/tests/test-connection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b534e11ac56bb8fc9efbbaf55e21a576dd23510e --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "prometheus.fullname" . }}-test-connection" + labels: + {{- include "prometheus.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['prometheus:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/monitoring-stack/charts/prometheus/values.yaml b/helm/monitoring-stack/charts/prometheus/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1083bd81fc3b5e4089640a0dade73ea413b35acc --- /dev/null +++ b/helm/monitoring-stack/charts/prometheus/values.yaml @@ -0,0 +1,92 @@ +# Default values for prometheus. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: "prom/prometheus" + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +tsdb: + path: /prometheus/ + retentionTime: 5d + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "prometheus" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +persistence: + enable: false + storage: 10Gi + +service: + type: NodePort + port: 9090 + +ingress: + enabled: false + className: "nginx" + # edge, region, wavelength + environment: "edge" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: prometheus-dt.tactile5g.int + paths: + - path: / + pathType: Prefix + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/helm/monitoring-stack/charts/skooner/.helmignore b/helm/monitoring-stack/charts/skooner/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helm/monitoring-stack/charts/skooner/Chart.yaml b/helm/monitoring-stack/charts/skooner/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a50a83a44e39bce62c3dd66c5fc9921efba886bc --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: skooner +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helm/monitoring-stack/charts/skooner/README.md b/helm/monitoring-stack/charts/skooner/README.md new file mode 100644 index 0000000000000000000000000000000000000000..944cee3d6a3770c4a07768639cb9cc3081e9ce83 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/README.md @@ -0,0 +1,44 @@ +# skooner + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) + +A Helm chart for Kubernetes + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `100` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"Always"` | | +| image.repository | string | `"ghcr.io/skooner-k8s/skooner"` | | +| image.tag | string | `"stable"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `"alb"` | | +| ingress.enabled | bool | `true` | | +| ingress.environment | string | `"region"` | | +| ingress.hosts[0].host | string | `"skooner.tactile5g.com"` | | +| ingress.hosts[0].paths[0].path | string | `"/"` | | +| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | | +| ingress.tls | list | `[]` | | +| nameOverride | string | `""` | | +| nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext | object | `{}` | | +| service.port | int | `4654` | | +| service.type | string | `"NodePort"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `"skooner-sa"` | | +| tolerations | list | `[]` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/monitoring-stack/charts/skooner/templates/NOTES.txt b/helm/monitoring-stack/charts/skooner/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..038d8bc76021a8fc947959bbdb3a2c795985e192 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "skooner.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "skooner.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "skooner.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "skooner.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/_helpers.tpl b/helm/monitoring-stack/charts/skooner/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..06244a038ddda12004e9026a7c46ec318bcad37d --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "skooner.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "skooner.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "skooner.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "skooner.labels" -}} +helm.sh/chart: {{ include "skooner.chart" . }} +{{ include "skooner.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "skooner.selectorLabels" -}} +app.kubernetes.io/name: {{ include "skooner.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: skooner +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "skooner.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "skooner.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/cluster-role.yaml b/helm/monitoring-stack/charts/skooner/templates/cluster-role.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0ebfccd5baa83760ad93643efe9d403dd08417b --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/cluster-role.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "skooner.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: {{ include "skooner.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/deployment.yaml b/helm/monitoring-stack/charts/skooner/templates/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91b119afce2ecf422f2f0bb190080205eb552fd9 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "skooner.fullname" . }} + labels: + {{- include "skooner.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "skooner.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "skooner.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "skooner.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/hpa.yaml b/helm/monitoring-stack/charts/skooner/templates/hpa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52320850503397fce35d68e40519c5e9e279ec45 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "skooner.fullname" . }} + labels: + {{- include "skooner.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "skooner.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/ingress.yaml b/helm/monitoring-stack/charts/skooner/templates/ingress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c8849cad06a547c96c99e7bd6f6a10fc6da935f --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/ingress.yaml @@ -0,0 +1,99 @@ +{{- if .Values.ingress.enabled -}} +{{- if eq .Values.ingress.environment "edge" }} +{{- $fullName := include "skooner.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName | trunc 30 }} + labels: + {{- include "skooner.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- if .Values.ingress.enabled }} +{{- if or (eq .Values.ingress.environment "region") (eq .Values.ingress.environment "wavelength") }} +{{- $fullName := include "skooner.fullname" . -}} +{{- $svcPort := .Values.service.port -}} + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName | trunc 30 }} + namespace: {{ .Release.Namespace }} + annotations: + alb.ingress.kubernetes.io/load-balancer-name: {{ $fullName | trunc 30 }} + alb.ingress.kubernetes.io/target-type: instance + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/tags: tactile5g/digital-twins={{ .Release.Namespace }} +spec: + ingressClassName: alb + defaultBackend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm/monitoring-stack/charts/skooner/templates/service.yaml b/helm/monitoring-stack/charts/skooner/templates/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d2e1e97d3d2ae340ee43fc36d1ad81eb08049e0 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "skooner.fullname" . }} + labels: + {{- include "skooner.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "skooner.selectorLabels" . | nindent 4 }} diff --git a/helm/monitoring-stack/charts/skooner/templates/serviceaccount.yaml b/helm/monitoring-stack/charts/skooner/templates/serviceaccount.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee040fd662a68ba348e433d69486d5960dea3a7f --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "skooner.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "skooner.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/monitoring-stack/charts/skooner/templates/tests/test-connection.yaml b/helm/monitoring-stack/charts/skooner/templates/tests/test-connection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dccfc6a38b215097665663572fbbdd9bccf12edc --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "skooner.fullname" . }}-test-connection" + labels: + {{- include "skooner.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['skooner:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/helm/monitoring-stack/charts/skooner/values.yaml b/helm/monitoring-stack/charts/skooner/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad511bf2892905f586a1a446ce118455167d0cc6 --- /dev/null +++ b/helm/monitoring-stack/charts/skooner/values.yaml @@ -0,0 +1,85 @@ +# Default values for skooner. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: ghcr.io/skooner-k8s/skooner + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "stable" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "skooner-sa" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: NodePort + port: 4654 + +ingress: + enabled: true + className: "nginx" + # edge, region, wavelength + environment: "edge" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: skooner.tactile5g.com + paths: + - path: / + pathType: Prefix + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: + kubernetes.io/os: linux + +tolerations: [] + +affinity: {} diff --git a/helm/monitoring-stack/values.yaml b/helm/monitoring-stack/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eac15c4e3e7088b066fdfc41f0561bb46c6cb173 --- /dev/null +++ b/helm/monitoring-stack/values.yaml @@ -0,0 +1,49 @@ + +grafana: + enabled: true + env: + prometheusUrl: http://monitoring-prometheus.monitoring.svc.cluster.local:9090 + ingress: + enable: true + hosts: + - host: grafana-dt.test.int + paths: + - path: / + pathType: Prefix + service: + type: ClusterIP + +prometheus: + enabled: true + ingress: + enabled: true + hosts: + - host: prometheus-dt.test.int + paths: + - path: / + pathType: Prefix + serviceAccount: + create: false + service: + type: ClusterIP + +skooner: + enabled: true + ingress: + enabled: true + className: nginx + hosts: + - host: skooner-dt.test.int + paths: + - path: / + pathType: Prefix + service: + type: ClusterIP + +metrics-server: + enabled: false + apiService: + create: true + extraArgs: + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP diff --git a/helm/scripts/create_remote_users.sh b/helm/scripts/create_remote_users.sh new file mode 100755 index 0000000000000000000000000000000000000000..909fffdd36ae9e45f614cc668220395a96cdbafd --- /dev/null +++ b/helm/scripts/create_remote_users.sh @@ -0,0 +1,136 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# User to create +TOTAL_USERS=1 +USERNAME_PREFIX= +USER_PASSWORD= + +help() { + echo "Usage: $1 " + echo " -u : User prefix to use" + echo " -p : Password to set for user" + echo " -t : Total user to create (default 1)" + echo " -h : show this help" + exit 1 +} + +# Read params +while getopts ":u:p:t:h" opt; do + case $opt in + u) + USERNAME_PREFIX="$OPTARG" + ;; + p) + USER_PASSWORD=$OPTARG + ;; + t) + TOTAL_USERS=$OPTARG + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +if [[ "$USERNAME_PREFIX" == "" ]] +then + echo "USERNAME_PREFIX must be set with option -u" + help + exit -1 +fi + +if [[ "$USER_PASSWORD" == "" ]] +then + echo "USER_PASSWORD must be set with option -p" + help + exit -1 +fi + +# Other Stuff +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + +# Mock Server +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + +# Show variables +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $CAPIF_REGISTER" +echo "CAPIF_HTTP_PORT = $CAPIF_HTTP_PORT" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $CAPIF_VAULT" +echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" +echo "TOTAL_USERS=$TOTAL_USERS" +echo "USERNAME_PREFIX=$USERNAME_PREFIX" +echo "USER_PASSWORD=$USER_PASSWORD" +echo "MOCK_SERVER_URL=$MOCK_SERVER_URL" +echo "NOTIFICATION_DESTINATION_URL=$NOTIFICATION_DESTINATION_URL" + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +cd $CAPIF_BASE_DIR + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTP_PORT:$CAPIF_HTTP_PORT \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL \ + --variable TOTAL_USERS:$TOTAL_USERS \ + --variable USERNAME_PREFIX:$USERNAME_PREFIX \ + --variable USER_PASSWORD:$USER_PASSWORD \ + --include create-users diff --git a/helm/scripts/get_ingress.sh b/helm/scripts/get_ingress.sh new file mode 100755 index 0000000000000000000000000000000000000000..eaed4e9b9c8e4c19be6df834da57b02e825e09d4 --- /dev/null +++ b/helm/scripts/get_ingress.sh @@ -0,0 +1,55 @@ +#!/bin/bash +IP="" +NAMESPACE="" +source $(dirname "$(readlink -f "$0")")/variables.sh + +help() { + echo "Usage: $1 " + echo " -i : IP to use" + echo " -n : Namespace to get ingress information" + echo " -k : Kubeconfig to be used" + echo " -h : show this help" + exit 1 +} +# Read params +while getopts ":i:n:k:h" opt; do + case $opt in + i) + IP="$OPTARG" + ;; + n) + NAMESPACE="$OPTARG" + ;; + k) + KUBECONFIG="$OPTARG" + if [ -z "$KUBECONFIG" ]; then + echo "The variable KUBECONFIG is empty. Using default k8s environment..." + else + KUBECONFIG="--kubeconfig $KUBECONFIG" + echo "The variable KUBECONFIG is not empty. Its value is: $KUBECONFIG" + fi + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +if [[ -n "$NAMESPACE" && -n "$IP" ]] +then + echo "IP: $IP and namespace: $NAMESPACE" +else + echo "IP ($IP) and NAMESPACE ($NAMESPACE) must be set" + exit -1 +fi + + +kubectl $KUBECONFIG -n $NAMESPACE get ing|grep -v NAME|awk "{print \"$IP \"\$3}" diff --git a/helm/scripts/install_capif.sh b/helm/scripts/install_capif.sh new file mode 100755 index 0000000000000000000000000000000000000000..bbf4b425e0909bee074a4006f65cfc60a092b490 --- /dev/null +++ b/helm/scripts/install_capif.sh @@ -0,0 +1,129 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +### download dependencies +helm $KUBECONFIG dependency build $HELM_DIR/capif/ + +### check ingress_ip.oneke and get ip from ingress-nginx-controller +kubectl $KUBECONFIG get svc -A | grep ingress-nginx-controller + +helm $KUBECONFIG upgrade --install -n $CAPIF_NAMESPACE $CAPIF_NAME_VERSION_CHART $HELM_DIR/capif/ \ +--set grafana.enabled=true \ +--set grafana.ingress.enabled=true \ +--set grafana.ingress.hosts[0].host=ocf-mon-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN \ +--set grafana.ingress.hosts[0].paths[0].path="/" \ +--set grafana.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set grafana.env.prometheusUrl=$PROMETHEUS_URL \ +--set grafana.env.tempoUrl="http://$CAPIF_NAME_VERSION_CHART-tempo:3100" \ +--set fluentbit.enabled=true \ +--set loki.enabled=true \ +--set tempo.tempo.metricsGenerator.remoteWriteUrl=$PROMETHEUS_URL/api/v1/write \ +--set otelcollector.enabled=true \ +--set otelcollector.configMap.tempoEndpoint=$CAPIF_NAME_VERSION_CHART-tempo:4317 \ +--set ocf-access-control-policy.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-access-control-policy-api \ +--set ocf-access-control-policy.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-access-control-policy.image.env.capifHostname=$CAPIF_HOSTNAME \ +--set ocf-access-control-policy.monitoring="true" \ +--set ocf-access-control-policy.env.logLevel="DEBUG" \ +--set ocf-api-invocation-logs.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-logging-api-invocation-api \ +--set ocf-api-invocation-logs.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-api-invocation-logs.env.monitoring="true" \ +--set ocf-api-invocation-logs.env.capifHostname=$CAPIF_HOSTNAME \ +--set ocf-api-invocation-logs.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-api-invocation-logs.env.vaultPort=$VAULT_PORT \ +--set ocf-api-invocation-logs.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-api-invocation-logs.env.logLevel="DEBUG" \ +--set ocf-api-invoker-management.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-api-invoker-management-api \ +--set ocf-api-invoker-management.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-api-invoker-management.env.monitoring="true" \ +--set ocf-api-invoker-management.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-api-invoker-management.env.vaultPort=$VAULT_PORT \ +--set ocf-api-invoker-management.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-api-invoker-management.env.logLevel="DEBUG" \ +--set ocf-api-provider-management.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-api-provider-management-api \ +--set ocf-api-provider-management.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-api-provider-management.env.monitoring="true" \ +--set ocf-api-provider-management.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-api-provider-management.env.logLevel="DEBUG" \ +--set ocf-api-provider-management.env.vaultPort=$VAULT_PORT \ +--set ocf-api-provider-management.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-events.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-events-api \ +--set ocf-events.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-events.env.monitoring="true" \ +--set ocf-events.env.logLevel="DEBUG" \ +--set ocf-routing-info.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-routing-info-api \ +--set ocf-routing-info.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-routing-info.env.monitoring="true" \ +--set ocf-routing-info.env.logLevel="DEBUG" \ +--set ocf-security.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-security-api \ +--set ocf-security.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-security.env.monitoring="true" \ +--set ocf-security.env.capifHostname=$CAPIF_HOSTNAME \ +--set ocf-security.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-security.env.vaultPort=$VAULT_PORT \ +--set ocf-security.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-security.env.logLevel="DEBUG" \ +--set ocf-register.image.repository=$CAPIF_DOCKER_REGISTRY/register \ +--set ocf-register.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-register.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-register.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-register.env.vaultPort=$VAULT_PORT \ +--set ocf-register.env.mongoHost=mongo-register \ +--set ocf-register.env.mongoPort=27017 \ +--set ocf-register.env.capifHostname=$CAPIF_HOSTNAME \ +--set ocf-register.ingress.enabled=true \ +--set ocf-register.ingress.hosts[0].host=$REGISTER_HOSTNAME \ +--set ocf-register.ingress.hosts[0].paths[0].path="/" \ +--set ocf-register.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set ocf-register.env.logLevel="DEBUG" \ +--set ocf-register.extraConfigPod.hostAliases[0].hostnames[0]=$CAPIF_HOSTNAME \ +--set ocf-register.extraConfigPod.hostAliases[0].ip=$K8S_IP \ +--set ocf-auditing-api-logs.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-auditing-api \ +--set ocf-auditing-api-logs.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-auditing-api-logs.env.monitoring="true" \ +--set ocf-auditing-api-logs.env.logLevel="DEBUG" \ +--set ocf-publish-service-api.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-publish-service-api \ +--set ocf-publish-service-api.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-publish-service-api.env.monitoring="true" \ +--set ocf-publish-service-api.env.logLevel="DEBUG" \ +--set ocf-discover-service-api.image.repository=$CAPIF_DOCKER_REGISTRY/ocf-discover-service-api \ +--set ocf-discover-service-api.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-discover-service-api.env.monitoring="true" \ +--set ocf-discover-service-api.env.logLevel="DEBUG" \ +--set nginx.image.repository=$CAPIF_DOCKER_REGISTRY/nginx \ +--set nginx.image.tag=$CAPIF_IMAGE_TAG \ +--set nginx.env.capifHostname=$CAPIF_HOSTNAME \ +--set nginx.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set nginx.env.vaultPort=$VAULT_PORT \ +--set nginx.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set nginx.ingress.enabled=true \ +--set nginx.ingress.hosts[0].host=$CAPIF_HOSTNAME \ +--set nginx.ingress.hosts[0].paths[0].path="/" \ +--set nginx.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set nginx.env.logLevel="debug" \ +--set ocf-helper.image.repository=$CAPIF_DOCKER_REGISTRY/helper \ +--set ocf-helper.image.tag=$CAPIF_IMAGE_TAG \ +--set ocf-helper.env.vaultHostname=$VAULT_INTERNAL_HOSTNAME \ +--set ocf-helper.env.vaultPort=$VAULT_PORT \ +--set ocf-helper.env.vaultAccessToken=$VAULT_ACCESS_TOKEN \ +--set ocf-helper.env.capifHostname=$CAPIF_HOSTNAME \ +--set ocf-helper.env.logLevel="DEBUG" \ +--set mock-server.enabled=true \ +--set mock-server.image.repository=$CAPIF_DOCKER_REGISTRY/mock-server \ +--set mock-server.image.tag=$CAPIF_IMAGE_TAG \ +--set mock-server.ingress.enabled=true \ +--set mock-server.ingress.hosts[0].host=mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN \ +--set mock-server.ingress.hosts[0].paths[0].path="/" \ +--set mock-server.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set mock-server.env.logLevel="DEBUG" \ +--set mongo-register-express.enabled=true \ +--set mongo-register-express.ingress.enabled=true \ +--set mongo-register-express.ingress.hosts[0].host="mongo-express-register-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ +--set mongo-register-express.ingress.hosts[0].paths[0].path="/" \ +--set mongo-register-express.ingress.hosts[0].paths[0].pathType="Prefix" \ +--set mongo-express.enabled=true \ +--set mongo-express.ingress.enabled=true \ +--set mongo-express.ingress.hosts[0].host="mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN" \ +--set mongo-express.ingress.hosts[0].paths[0].path="/" \ +--set mongo-express.ingress.hosts[0].paths[0].pathType="Prefix" \ +--wait --timeout=10m --create-namespace --atomic diff --git a/helm/scripts/install_monitoring.sh b/helm/scripts/install_monitoring.sh new file mode 100755 index 0000000000000000000000000000000000000000..f881f0a1a971631f0d0bb210a2b3b3b2772c55da --- /dev/null +++ b/helm/scripts/install_monitoring.sh @@ -0,0 +1,17 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +helm repo add bitnami https://charts.bitnami.com/bitnami + +helm $KUBECONFIG dependency build $HELM_DIR/monitoring-stack/ + +helm $KUBECONFIG upgrade --install -n $MONITORING_NAMESPACE $MONITORING_SERVICE_NAME $HELM_DIR/monitoring-stack/ \ +--set grafana.enabled=false \ +--set grafana.env.prometheusUrl=$PROMETHEUS_URL \ +--set prometheus.enabled=true \ +--set prometheus.ingress.enabled=true \ +--set prometheus.ingress.hosts[0].host=$PROMETHEUS_HOSTNAME \ +--set prometheus.ingress.hosts[0].paths[0].path="/" \ +--set prometheus.ingress.hosts[0].paths[0].pathType="Prefix" \ +--wait --timeout=10m --create-namespace --atomic + diff --git a/helm/scripts/install_vault.sh b/helm/scripts/install_vault.sh new file mode 100755 index 0000000000000000000000000000000000000000..c06e560a9609bfc6a5333cbe3d1008cbe710bf20 --- /dev/null +++ b/helm/scripts/install_vault.sh @@ -0,0 +1,154 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# Function to get the service status +get_service_status() { + kubectl $KUBECONFIG get pods -n "$VAULT_NAMESPACE" -l $LABEL_TO_CHECK="$VAULT_SERVICE_NAME" -o jsonpath='{.items[*].status.phase}' +} + +# Function to get the number of ready replicas +get_ready_replicas() { + kubectl $KUBECONFIG get pods -n "$VAULT_NAMESPACE" -l $LABEL_TO_CHECK="$VAULT_SERVICE_NAME" -o jsonpath='{.items[*].status.containerStatuses[0].ready}' +} + +# Function to get the number of ready replicas +get_started_replicas() { + kubectl $KUBECONFIG get pods -n "$VAULT_NAMESPACE" -l $LABEL_TO_CHECK="$VAULT_SERVICE_NAME" -o jsonpath='{.items[*].status.containerStatuses[0].started}' +} + +get_succeeded_job_status() { + kubectl $KUBECONFIG get jobs -n "$VAULT_NAMESPACE" -o jsonpath='{.items[*].status.succeeded}' +} + +get_failed_job_status() { + kubectl $KUBECONFIG get jobs -n "$VAULT_NAMESPACE" -o jsonpath='{.items[*].status.failed}' +} + +get_completion_job_status() { + kubectl $KUBECONFIG get jobs -n "$VAULT_NAMESPACE" -o jsonpath='{.items[*].status.conditions[0].status}' +} + +get_completed_type_job_status(){ + kubectl $KUBECONFIG get jobs -n "$VAULT_NAMESPACE" -o jsonpath='{.items[*].status.conditions[0].type}' +} + +helm $KUBECONFIG repo add hashicorp https://helm.releases.hashicorp.com + +helm $KUBECONFIG upgrade --install vault hashicorp/vault -n $VAULT_NAMESPACE --set server.ingress.enabled=true \ +--set server.ingress.hosts[0].host="$VAULT_HOSTNAME" \ +--set server.ingress.ingressClassName=nginx \ +--set server.standalone.enabled=true --create-namespace + +# Loop to wait until the service is in "Running" state and has 0/1 ready replicas +while true; do + SERVICE_STATUS=$(get_service_status) + READY_REPLICAS=$(get_ready_replicas) + STARTED_REPLICAS=$(get_started_replicas) + + echo "Service status: $SERVICE_STATUS" + echo "Ready replicas: $READY_REPLICAS" + echo "Started Replicas: $STARTED_REPLICAS" + + if [ "$SERVICE_STATUS" == "Running" ] && [ "$READY_REPLICAS" == "false" ] && [ "$STARTED_REPLICAS" == "true" ]; then + echo "The service $VAULT_SERVICE_NAME is in RUNNING state and has 0/1 ready replicas." + break + else + echo "Waiting for the service $VAULT_SERVICE_NAME to be in RUNNING state and have 0/1 ready replicas..." + sleep 5 + fi +done + +echo "The service $VAULT_SERVICE_NAME is now in the desired state." + +# Init vault +echo "" +echo "Init vault" +kubectl $KUBECONFIG exec -ti vault-0 -n $VAULT_NAMESPACE -- vault operator init -key-shares=1 -key-threshold=1 > $VAULT_FILE + +# Remove control characters +cat $VAULT_FILE | sed -r 's/\x1B\[[0-9;]*[JKmsu]//g' | sed -e 's/[^[:print:]\t\n]//g' > $VAULT_FILE.tmp +mv $VAULT_FILE.tmp $VAULT_FILE + +# get UNSEAL Key and TOKEN +UNSEAL_KEY=$(awk '/Unseal Key 1/{ print $4 }' $VAULT_FILE) +VAULT_TOKEN=$(awk '/Initial Root Token/{ print $4 }' $VAULT_FILE) + +echo "UNSEAL KEY: $UNSEAL_KEY" +echo "VAULT TOKEN: $VAULT_TOKEN" + +kubectl $KUBECONFIG exec -ti vault-0 -n $VAULT_NAMESPACE -- vault operator unseal $UNSEAL_KEY + +# Loop to wait until the service is in "Running" state and has 1/1 ready replicas +while true; do + SERVICE_STATUS=$(get_service_status) + READY_REPLICAS=$(get_ready_replicas) + STARTED_REPLICAS=$(get_started_replicas) + + echo "Service status: $SERVICE_STATUS" + echo "Ready replicas: $READY_REPLICAS" + echo "Started Replicas: $STARTED_REPLICAS" + + if [ "$SERVICE_STATUS" == "Running" ] && [ "$READY_REPLICAS" == "true" ] && [ "$STARTED_REPLICAS" == "true" ]; then + echo "The service $VAULT_SERVICE_NAME is in RUNNING state and has 0/1 ready replicas." + break + else + echo "Waiting for the service $VAULT_SERVICE_NAME to be in RUNNING state and have 1/1 ready replicas..." + sleep 5 + fi +done + +sed -i "s/namespace:.*/namespace: $VAULT_NAMESPACE/g" $HELM_DIR/vault-job/vault-job.yaml +sed -i "s/VAULT_TOKEN=.*/VAULT_TOKEN=$VAULT_TOKEN/g" $HELM_DIR/vault-job/vault-job.yaml +sed -i "s/DOMAIN1=.*/DOMAIN1=$DOMAIN1/g" $HELM_DIR/vault-job/vault-job.yaml +sed -i "s/DOMAIN2=.*/DOMAIN2=$DOMAIN2/g" $HELM_DIR/vault-job/vault-job.yaml +sed -i "s/DOMAIN3=.*/DOMAIN3=$DOMAIN3/g" $HELM_DIR/vault-job/vault-job.yaml + +kubectl $KUBECONFIG delete job $VAULT_JOB_NAME -n $VAULT_NAMESPACE || echo "No vault job present" +kubectl $KUBECONFIG -n $VAULT_NAMESPACE apply -f $HELM_DIR/vault-job/ + +# Check job status +while true; do + SUCCEEDED_JOB_STATUS=$(get_succeeded_job_status) + FAILED_JOB_STATUS=$(get_failed_job_status) + COMPLETION_JOB_STATUS=$(get_completion_job_status) + COMPLETED_TYPE_JOB_STATUS=$(get_completed_type_job_status) + + echo "SUCCEEDED_JOB_STATUS: $SUCCEEDED_JOB_STATUS" + echo "FAILED_JOB_STATUS: $FAILED_JOB_STATUS" + echo "COMPLETION_JOB_STATUS: $COMPLETION_JOB_STATUS" + echo "COMPLETED_TYPE_JOB_STATUS: $COMPLETED_TYPE_JOB_STATUS" + + if [ "$FAILED_JOB_STATUS" != "" ]; then + echo "The vault job fails, check variables." + exit -1 + elif [ "$SUCCEEDED_JOB_STATUS" != "" ] && (( SUCCEEDED_JOB_STATUS > 0 )) && [ "$COMPLETED_TYPE_JOB_STATUS" == "Complete" ] && [ "$COMPLETION_JOB_STATUS" == "True" ]; then + echo "The vault job succeeded." + break + else + echo "Waiting for the service $VAULT_SERVICE_NAME to be in RUNNING state and have 0/1 ready replicas..." + sleep 5 + fi +done + +echo "Job Success" +# Loop to wait until the service is in "Running" state and has 0/1 ready replicas + +while true; do + SERVICE_STATUS=$(get_service_status) + READY_REPLICAS=$(get_ready_replicas) + STARTED_REPLICAS=$(get_started_replicas) + + echo "Service status: $SERVICE_STATUS" + echo "Ready replicas: $READY_REPLICAS" + echo "Started Replicas: $STARTED_REPLICAS" + + if [ "$SERVICE_STATUS" == "Running" ] && [ "$READY_REPLICAS" == "true" ] && [ "$STARTED_REPLICAS" == "true" ]; then + echo "The service $VAULT_SERVICE_NAME is in RUNNING state and has 1/1 ready replicas." + break + else + echo "Waiting for the service $VAULT_SERVICE_NAME to be in RUNNING state and have 1/1 ready replicas..." + sleep 5 + fi +done + +echo "The service $VAULT_SERVICE_NAME is successfully deployed." diff --git a/helm/scripts/populate_create_remote_dummy_users.sh b/helm/scripts/populate_create_remote_dummy_users.sh new file mode 100755 index 0000000000000000000000000000000000000000..d2e137dbf5f15e3135b129699d0487be8888598f --- /dev/null +++ b/helm/scripts/populate_create_remote_dummy_users.sh @@ -0,0 +1,117 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# Populate variables +TOTAL_INVOKERS=10 +TOTAL_PROVIDERS=10 + +help() { + echo "Usage: $1 " + echo " -p : Total providers to create (default 10)" + echo " -i : Total providers to create (default 10)" + echo " -h : show this help" + exit 1 +} + +# Read params +while getopts ":p:i:h" opt; do + case $opt in + p) + TOTAL_PROVIDERS=$OPTARG + ;; + i) + TOTAL_INVOKERS=$OPTARG + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +# Other Stuff +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + +# Mock Server +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + + +# Show variables +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $CAPIF_REGISTER" +echo "CAPIF_HTTP_PORT = $CAPIF_HTTP_PORT" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $CAPIF_VAULT" +echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" +echo "TOTAL_USERS=$TOTAL_USERS" +echo "USERNAME_PREFIX=$USERNAME_PREFIX" +echo "USER_PASSWORD=$USER_PASSWORD" +echo "MOCK_SERVER_URL=$MOCK_SERVER_URL" +echo "NOTIFICATION_DESTINATION_URL=$NOTIFICATION_DESTINATION_URL" + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +cd $CAPIF_BASE_DIR + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTP_PORT:$CAPIF_HTTP_PORT \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL \ + --variable TOTAL_PROVIDERS:$TOTAL_PROVIDERS \ + --variable TOTAL_INVOKERS:$TOTAL_INVOKERS \ + --include populate-create diff --git a/helm/scripts/populate_remove_remote_dummy_users.sh b/helm/scripts/populate_remove_remote_dummy_users.sh new file mode 100755 index 0000000000000000000000000000000000000000..1a22319317c9a2ed343937fa66fb59cb624b2790 --- /dev/null +++ b/helm/scripts/populate_remove_remote_dummy_users.sh @@ -0,0 +1,104 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +help() { + echo "Usage: $1 " + echo "Removes all dummy users populated, stored on latest zip file on results" + echo " -h : show this help" + exit 1 +} + +# Read params +while getopts ":h" opt; do + case $opt in + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +# Other Stuff +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + +# Mock Server +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + + +# Show variables +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $CAPIF_REGISTER" +echo "CAPIF_HTTP_PORT = $CAPIF_HTTP_PORT" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $CAPIF_VAULT" +echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" +echo "TOTAL_USERS=$TOTAL_USERS" +echo "USERNAME_PREFIX=$USERNAME_PREFIX" +echo "USER_PASSWORD=$USER_PASSWORD" +echo "MOCK_SERVER_URL=$MOCK_SERVER_URL" +echo "NOTIFICATION_DESTINATION_URL=$NOTIFICATION_DESTINATION_URL" + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +cd $CAPIF_BASE_DIR + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTP_PORT:$CAPIF_HTTP_PORT \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL \ + --include populate-remove diff --git a/helm/scripts/remove_remote_users.sh b/helm/scripts/remove_remote_users.sh new file mode 100755 index 0000000000000000000000000000000000000000..1891fec00de393dd13b095bf774963327cd89f21 --- /dev/null +++ b/helm/scripts/remove_remote_users.sh @@ -0,0 +1,117 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# User to remove +USERNAME_PREFIX= + +help() { + echo "Usage: $1 " + echo " -u : User prefix to use" + echo " -h : show this help" + exit 1 +} + +# Read params +while getopts ":u:p:t:h" opt; do + case $opt in + u) + USERNAME_PREFIX="$OPTARG" + ;; + h) + help + ;; + \?) + echo "Not valid option: -$OPTARG" >&2 + help + ;; + :) + echo "The -$OPTARG option requires an argument." >&2 + help + ;; + esac +done + +if [[ "$USERNAME_PREFIX" == "" ]] +then + echo "USERNAME_PREFIX must be set with option -u" + help + exit -1 +fi + +# Other Stuff +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + +# Mock Server +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + +# Show variables +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $CAPIF_REGISTER" +echo "CAPIF_HTTP_PORT = $CAPIF_HTTP_PORT" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $CAPIF_VAULT" +echo "CAPIF_VAULT_PORT = $CAPIF_VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $CAPIF_VAULT_TOKEN" +echo "TOTAL_USERS=$TOTAL_USERS" +echo "USERNAME_PREFIX=$USERNAME_PREFIX" +echo "USER_PASSWORD=$USER_PASSWORD" +echo "MOCK_SERVER_URL=$MOCK_SERVER_URL" +echo "NOTIFICATION_DESTINATION_URL=$NOTIFICATION_DESTINATION_URL" + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +cd $CAPIF_BASE_DIR + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTP_PORT:$CAPIF_HTTP_PORT \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL \ + --variable USERNAME_PREFIX:$USERNAME_PREFIX \ + --include remove-users diff --git a/helm/scripts/run_remote_capif_tests.sh b/helm/scripts/run_remote_capif_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..0095300e4537a62ba01cfc267ce6a8ff0b0d97f4 --- /dev/null +++ b/helm/scripts/run_remote_capif_tests.sh @@ -0,0 +1,70 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +DOCKER_ROBOT_IMAGE=labs.etsi.org:5050/ocf/robot-tests-image +DOCKER_ROBOT_IMAGE_VERSION=1.0 + +TEST_FOLDER=$CAPIF_BASE_DIR/tests +RESULT_FOLDER=$CAPIF_BASE_DIR/results +ROBOT_DOCKER_FILE_FOLDER=$CAPIF_BASE_DIR/tools/robot + +# nginx Hostname and http port (80 by default) to reach for tests +CAPIF_REGISTER=$REGISTER_HOSTNAME +CAPIF_REGISTER_PORT=443 +CAPIF_HTTPS_PORT=443 + +# VAULT access configuration +CAPIF_VAULT=$VAULT_HOSTNAME +CAPIF_VAULT_PORT=80 +CAPIF_VAULT_TOKEN=$VAULT_ACCESS_TOKEN + + +MOCK_SERVER_URL=http://mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN:80 +NOTIFICATION_DESTINATION_URL=http://mock-server.$CAPIF_NAMESPACE.svc.cluster.local:9090 + +echo "CAPIF_HOSTNAME = $CAPIF_HOSTNAME" +echo "CAPIF_REGISTER = $REGISTER_HOSTNAME" +echo "CAPIF_HTTPS_PORT = $CAPIF_HTTPS_PORT" +echo "CAPIF_VAULT = $VAULT_INTERNAL_HOSTNAME" +echo "CAPIF_VAULT_PORT = $VAULT_PORT" +echo "CAPIF_VAULT_TOKEN = $VAULT_ACCESS_TOKEN" +echo "MOCK_SERVER_URL = $MOCK_SERVER_URL" + +cd $CAPIF_BASE_DIR + +docker >/dev/null 2>/dev/null +if [[ $? -ne 0 ]] +then + echo "Docker maybe is not installed. Please check if docker CLI is present." + exit -1 +fi + +docker images|grep -Eq '^'$DOCKER_ROBOT_IMAGE'[ ]+[ ]'$DOCKER_ROBOT_IMAGE_VERSION'' +if [[ $? -ne 0 ]] +then + read -p "Robot image is not present. To continue, Do you want to build it? (y/n)" build_robot_image + if [[ $build_robot_image == "y" ]] + then + echo "Building Robot docker image." + cd $ROBOT_DOCKER_FILE_FOLDER + docker build --no-cache -t $DOCKER_ROBOT_IMAGE:$DOCKER_ROBOT_IMAGE_VERSION . + cd $CAPIF_BASE_DIR + else + exit -2 + fi +fi + +mkdir -p $RESULT_FOLDER + +docker run -ti --rm --network="host" \ + -v $TEST_FOLDER:/opt/robot-tests/tests \ + -v $RESULT_FOLDER:/opt/robot-tests/results ${DOCKER_ROBOT_IMAGE}:${DOCKER_ROBOT_IMAGE_VERSION} \ + --variable CAPIF_HOSTNAME:$CAPIF_HOSTNAME \ + --variable CAPIF_HTTPS_PORT:$CAPIF_HTTPS_PORT \ + --variable CAPIF_REGISTER:$CAPIF_REGISTER \ + --variable CAPIF_REGISTER_PORT:$CAPIF_REGISTER_PORT \ + --variable CAPIF_VAULT:$CAPIF_VAULT \ + --variable CAPIF_VAULT_PORT:$CAPIF_VAULT_PORT \ + --variable CAPIF_VAULT_TOKEN:$CAPIF_VAULT_TOKEN \ + --variable NOTIFICATION_DESTINATION_URL:$NOTIFICATION_DESTINATION_URL \ + --variable MOCK_SERVER_URL:$MOCK_SERVER_URL $@ diff --git a/helm/scripts/uninstall_capif.sh b/helm/scripts/uninstall_capif.sh new file mode 100755 index 0000000000000000000000000000000000000000..1c9ed62ab9b1548a3a4b6e1335141f2ad16f9f4f --- /dev/null +++ b/helm/scripts/uninstall_capif.sh @@ -0,0 +1,29 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# Function to display a warning message +warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with CAPIF service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" +} + +# Display the warning message +warning_message + +# Read the user input +read -r USER_INPUT + +# Check if the user confirmed the uninstallation +if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 +fi + +# Proceed with the uninstallation process +echo "Proceeding with uninstallation..." + +helm $KUBECONFIG uninstall $CAPIF_NAME_VERSION_CHART -n $CAPIF_NAMESPACE || echo "$CAPIF_NAME_VERSION_CHART is not present" +kubectl $KUBECONFIG delete namespace $CAPIF_NAMESPACE || echo "$CAPIF_NAMESPACE is not present" + +echo "Uninstallation complete. The CAPIF service and all associated data have been removed." \ No newline at end of file diff --git a/helm/scripts/uninstall_monitoring.sh b/helm/scripts/uninstall_monitoring.sh new file mode 100755 index 0000000000000000000000000000000000000000..f59954a198b123ff5edec4184c14ea86e5654305 --- /dev/null +++ b/helm/scripts/uninstall_monitoring.sh @@ -0,0 +1,29 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# Function to display a warning message +warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with CAPIF service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" +} + +# Display the warning message +warning_message + +# Read the user input +read -r USER_INPUT + +# Check if the user confirmed the uninstallation +if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 +fi + +# Proceed with the uninstallation process +echo "Proceeding with uninstallation..." + +helm $KUBECONFIG uninstall $MONITORING_SERVICE_NAME -n $MONITORING_NAMESPACE || echo "$MONITORING_SERVICE_NAME is not present" +kubectl $KUBECONFIG delete namespace $MONITORING_NAMESPACE || echo "$MONITORING_NAMESPACE is not present" + +echo "Uninstallation complete. The Monitoring service and all associated data have been removed." \ No newline at end of file diff --git a/helm/scripts/uninstall_vault.sh b/helm/scripts/uninstall_vault.sh new file mode 100755 index 0000000000000000000000000000000000000000..4f7d562804b1f0bdc52a82ea2993bcc46c81cb84 --- /dev/null +++ b/helm/scripts/uninstall_vault.sh @@ -0,0 +1,30 @@ +#!/bin/bash +source $(dirname "$(readlink -f "$0")")/variables.sh + +# Function to display a warning message +warning_message() { + echo "WARNING: This uninstallation process is irreversible." + echo "All data associated with Vault service will be permanently lost." + echo "Are you sure you want to continue? (yes/no)" +} + +# Display the warning message +warning_message + +# Read the user input +read -r USER_INPUT + +# Check if the user confirmed the uninstallation +if [ "$USER_INPUT" != "yes" ]; then + echo "Uninstallation aborted by the user." + exit 1 +fi + +# Proceed with the uninstallation process +echo "Proceeding with uninstallation..." + +helm $KUBECONFIG uninstall $VAULT_SERVICE_NAME -n $VAULT_NAMESPACE +kubectl $KUBECONFIG delete job $VAULT_JOB_NAME -n $VAULT_NAMESPACE || echo "No vault $VAULT_JOB_NAME job present" +kubectl $KUBECONFIG delete namespace $VAULT_NAMESPACE + +echo "Uninstallation complete. The Vault service and all associated data have been removed." \ No newline at end of file diff --git a/helm/scripts/variables.sh b/helm/scripts/variables.sh new file mode 100755 index 0000000000000000000000000000000000000000..2a9b5a8293b4010007bb1349c057e4ddd742a151 --- /dev/null +++ b/helm/scripts/variables.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +# Use custom kubeconfig. If you set here the path to a kubeconfig file it will be used in installation/uninstallation scripts +export KUBECONFIG="" +if [ -z "$KUBECONFIG" ]; then + echo "The variable KUBECONFIG is empty. Using default k8s environment..." +else + KUBECONFIG="--kubeconfig $KUBECONFIG" + echo "The variable KUBECONFIG is not empty. Its value is: $KUBECONFIG" +fi + +# timestap to use along scripts +export timestamp=$(date +"%Y%m%d_%H%M%S") + +# k8s public ip. NONE will indicate no local register service DNS resolution to reach CCF, empty value will try to get ip of ingress-nginx-controller NodePort +# and any other vaule will set resolution to K8S_IP set for CAPIF_HOSTNAME. +export K8S_IP="" + +# Directories variables setup (no modification needed) +export SCRIPTS_DIR=$(dirname "$(readlink -f "$0")") +export HELM_DIR=$(dirname "$SCRIPTS_DIR") +export CAPIF_BASE_DIR=$(dirname "$HELM_DIR") + +# Print scripts directory +echo "The /helm/scripts directory is: $SCRIPTS_DIR" +echo "The /helm directory is: $HELM_DIR" +echo "The base directory is: $CAPIF_BASE_DIR" + +# Configuration needed before use installation/uninstallation scripts + +# Vault installation variables +## Vault configuration +export VAULT_HOSTNAME=vault.testbed.develop +export VAULT_NAMESPACE=ocf-vault +export VAULT_SERVICE_NAME='vault' +export LABEL_TO_CHECK="app.kubernetes.io/name" + +## File to store key and token +export VAULT_FILE="$HELM_DIR/vault_keys.txt" + +## Vault domains to be included +export DOMAIN1=*.testbed.pre-production +export DOMAIN2=*.testbed.validation +export DOMAIN3=*.testbed.develop + +## Vault configuration job +VAULT_JOB_NAME=vault-pki + +# Monitoring installation variables +## Prometheus Hostname to be used at ingress configuration +export PROMETHEUS_HOSTNAME=prometheus.testbed.develop +export MONITORING_NAMESPACE=monitoring +export MONITORING_SERVICE_NAME=monitoring + +# OpenCAPIF deployment variables +## Register and Capif hostname to be deployed +export CAPIF_HOSTNAME="capif.testbed.develop" +export REGISTER_HOSTNAME="register.testbed.develop" +## namespace to use +export CAPIF_NAMESPACE=ocf-capif +## version to be used on deployment +export CAPIF_NAME_VERSION_CHART=ocf-release1 +## Configuration of endpoints in ingress for grafana, mock-server and both mongo express instances. +### this configuration is used to add this script to ocf-mon-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN mock-server-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN mongo-express-register-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN mongo-express-$CAPIF_CI_ENV_ENDPOINT.$CAPIF_DOMAIN +export CAPIF_CI_ENV_ENDPOINT=capif +### Domain to ve used in grafana, mock-server and both mongo express instances. +export CAPIF_DOMAIN=testbed.develop +## Configuration of images to be used on deplyment +### Docker Registry to download images (must be accesible by k8s cluster) +export CAPIF_DOCKER_REGISTRY="labs.etsi.org:5050/ocf/capif/prod" +### Tag to be used +export CAPIF_IMAGE_TAG="v1.0.0-release" +## Prometheus url, usually internal k8s hostname (if capif will be deployed on same k8s cluster) with port 9090 +export PROMETHEUS_URL="http://$MONITORING_SERVICE_NAME-prometheus.$MONITORING_NAMESPACE.svc.cluster.local:9090" +## vault capif configuration +export VAULT_INTERNAL_HOSTNAME="$VAULT_SERVICE_NAME.$VAULT_NAMESPACE.svc.cluster.local" +export VAULT_PORT="8200" +export VAULT_ACCESS_TOKEN="dev-only-token" + +### To deploy in other environment we need to setup urls according to it and also using specific kubeconfig: +if [ -f "$VAULT_FILE" ] && [ -s "$VAULT_FILE" ]; then + VAULT_ACCESS_TOKEN=$(awk '/Initial Root Token/{ print $4 }' $VAULT_FILE) + echo "$VAULT_FILE exists and has content." +else + echo "$VAULT_FILE not exists or content is empty." +fi +echo "Using value on VAULT_ACCESS_TOKEN=$VAULT_ACCESS_TOKEN" + +### If K8S_IP is empty, then script will try to get ingress-nginx-controller NodePort to grant DNS resolution for register to connect locally to CAPIF nginx +if [ "$K8S_IP" == "NONE" ]; then + echo "K8S_IP value is NONE. Register service will not have local DNS resolution" +elif [ -z "$K8S_IP" ]; then + K8S_IP=$(kubectl $KUBECONFIG get svc -A | grep ingress-nginx-controller | awk '/NodePort/{ print $4 }') + echo "K8S_IP value will be $K8S_IP" +fi diff --git a/services/docker-compose/docker-compose-capif.yml b/services/docker-compose/docker-compose-capif.yml deleted file mode 100644 index 0095d9a1794b1f86a319c0819359f340b731fabe..0000000000000000000000000000000000000000 --- a/services/docker-compose/docker-compose-capif.yml +++ /dev/null @@ -1,246 +0,0 @@ -version: '3.7' - -services: - redis: - image: "redis:alpine" - command: redis-server - ports: - - "6379:6379" - volumes: - - $PWD/redis-data:/var/lib/redis - - $PWD/redis.conf:/usr/local/etc/redis/redis.conf - environment: - - REDIS_REPLICATION_MODE=master - - access-control-policy: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Access_Control_Policy_API:/usr/src/app - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - environment: - - CONTAINER_NAME=access-control-policy - - MONITORING=${MONITORING} - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/access-control-policy:v3.1.4 - depends_on: - - redis - - nginx - - - api-invoker-management: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_API_Invoker_Management_API:/usr/src/app - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - - vault:host-gateway - environment: - - CONTAINER_NAME=api-invoker-management - - MONITORING=${MONITORING} - - VAULT_HOSTNAME=vault - - VAULT_ACCESS_TOKEN=dev-only-token - - VAULT_PORT=8200 - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/api-invoker-management-api:v3.1.4 - depends_on: - - redis - - nginx - - api-provider-management: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_API_Provider_Management_API:/usr/src/app - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - - vault:host-gateway - image: public.ecr.aws/o2v4a8t6/opencapif/api-provider-management-api:v3.1.4 - environment: - - CONTAINER_NAME=api-provider-management - - MONITORING=${MONITORING} - - VAULT_HOSTNAME=vault - - VAULT_ACCESS_TOKEN=dev-only-token - - VAULT_PORT=8200 - depends_on: - - redis - - nginx - - logs: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Auditing_API:/usr/src/app - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/auditing-api:v3.1.4 - environment: - - CONTAINER_NAME=api-auditing - - MONITORING=${MONITORING} - depends_on: - - mongo - - service-apis: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Discover_Service_API:/usr/src/app - restart: unless-stopped - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - image: public.ecr.aws/o2v4a8t6/opencapif/discover-service-api:v3.1.4 - environment: - - CONTAINER_NAME=services-apis - - MONITORING=${MONITORING} - depends_on: - - mongo - - capif-events: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Events_API:/usr/src/app - image: public.ecr.aws/o2v4a8t6/opencapif/events-api:v3.1.4 - environment: - - CONTAINER_NAME=api-events - - MONITORING=${MONITORING} - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - depends_on: - - redis - - mongo - - api-invocation-logs: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Logging_API_Invocation_API:/usr/src/app - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/api-invocation-logs-api:v3.1.4 - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - environment: - - CAPIF_HOSTNAME=${CAPIF_HOSTNAME} - - CONTAINER_NAME=api-invocation-logs - - MONITORING=${MONITORING} - depends_on: - - mongo - - published-apis: - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Publish_Service_API:/usr/src/app - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/publish-service-api:v3.1.4 - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - environment: - - CONTAINER_NAME=api-publish-apis - - MONITORING=${MONITORING} - depends_on: - - redis - - mongo - - capif-routing-info: - expose: - - "8080" - image: public.ecr.aws/o2v4a8t6/opencapif/routing-info-api:v3.1.4 - - capif-security: - build: ../TS29222_CAPIF_Security_API/. - expose: - - "8080" - volumes: - - ../TS29222_CAPIF_Security_API:/usr/src/app - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/security-api:v3.1.4 - environment: - - CAPIF_HOSTNAME=${CAPIF_HOSTNAME} - - CONTAINER_NAME=api-security - - MONITORING=${MONITORING} - - VAULT_HOSTNAME=vault - - VAULT_ACCESS_TOKEN=dev-only-token - - VAULT_PORT=8200 - extra_hosts: - - host.docker.internal:host-gateway - - fluent-bit:host-gateway - - otel-collector:host-gateway - - vault:host-gateway - depends_on: - - redis - - nginx - - mongo: - image: mongo:6.0.2 - logging: - driver: 'none' - restart: unless-stopped - ports: - - 27017:27017 - environment: - MONGO_INITDB_ROOT_USERNAME: root - MONGO_INITDB_ROOT_PASSWORD: example - - mongo-express: - image: mongo-express:1.0.0-alpha.4 - logging: - driver: 'none' - restart: unless-stopped - ports: - - 8082:8081 - environment: - ME_CONFIG_MONGODB_ADMINUSERNAME: root - ME_CONFIG_MONGODB_ADMINPASSWORD: example - ME_CONFIG_MONGODB_URL: mongodb://root:example@mongo:27017/ - depends_on: - - mongo - - nginx: - ports: - - "8080:8080" - - "443:443" - image: public.ecr.aws/o2v4a8t6/opencapif/nginx:v3.1.4 - environment: - - CAPIF_HOSTNAME=${CAPIF_HOSTNAME} - - VAULT_HOSTNAME=vault - - VAULT_ACCESS_TOKEN=dev-only-token - - VAULT_PORT=8200 - hostname: ${CAPIF_HOSTNAME} - volumes: - - ./nginx/certs:/etc/nginx/certs - extra_hosts: - - host.docker.internal:host-gateway - - vault:host-gateway - restart: unless-stopped - depends_on: - - redis - - service-apis - - api-invocation-logs - - published-apis - - capif-events - - logs - -networks: - default: - name: capif-network - external: true diff --git a/services/docker-compose/docker-compose-montoring.yml b/services/docker-compose/docker-compose-montoring.yml deleted file mode 100644 index 01dc06bcd929e1c696132d40c13b7488d8edbb7e..0000000000000000000000000000000000000000 --- a/services/docker-compose/docker-compose-montoring.yml +++ /dev/null @@ -1,110 +0,0 @@ -version: '3' -services: - prometheus: - image: prom/prometheus:latest - container_name: prometheus - user: "${DUID}:${DGID}" - volumes: - - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml - - ./prometheus/prometheus_db:/var/lib/prometheus - - ./prometheus/prometheus_db:/prometheus - - ./prometheus/prometheus_db:/etc/prometheus - - ./prometheus/alert.rules:/etc/prometheus/alert.rules - command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--web.route-prefix=/' - - '--storage.tsdb.retention.time=200h' - - '--web.enable-lifecycle' - restart: unless-stopped - ports: - - '9090:9090' - - # cadvisor collects metrics about running containers - cadvisor: - image: gcr.io/cadvisor/cadvisor:v0.47.2 - container_name: cadvisor - ports: - - 8090:8080 - volumes: - - /:/rootfs:ro - - /var/run:/var/run:rw - - /sys:/sys:ro - - /var/lib/docker/:/var/lib/docker:ro - - /var/run/docker.sock:/var/run/docker.sock:rw - - grafana: - image: grafana/grafana - user: "${DUID}:${DGID}" - environment: - - GF_SECURITY_ADMIN_PASSWORD=secure_pass - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - volumes: - - ./grafana/grafana_config/grafana.ini:/etc/grafana/grafana.ini - - ./grafana/grafana_db:/var/lib/grafana - - ./grafana/grafana_provisioning/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml - - ./grafana/grafana_provisioning/grafana-default-provisioning.yaml:/etc/grafana/provisioning/dashboards/default.yaml - - ./grafana/grafana_dashboards/Docker-monitoring.json:/var/lib/grafana/dashboards/Docker-monitoring.json - - ./grafana/grafana_dashboards/Loki-Logs.json:/var/lib/grafana/dashboards/Loki-Logs.json - - depends_on: - - prometheus - ports: - - '3000:3000' - - # loki save and analyze logs - loki: - image: grafana/loki:2.8.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - - # promtail send docker logs to loki - promtail: - image: grafana/promtail:2.8.0 - volumes: - - /var/log:/var/log - command: -config.file=/etc/promtail/config.yml - - # grafana image renderer - renderer: - image: grafana/grafana-image-renderer:latest - container_name: grafana-image-renderer - expose: - - "8081" - environment: - ENABLE_METRICS: "true" - - # fluent-bit send logs to loki - fluent-bit: - image: grafana/fluent-bit-plugin-loki:main - container_name: fluent-bit - environment: - - LOKI_URL=http://loki:3100/loki/api/v1/push - volumes: - - ./fluent_bit/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf - ports: - - "24224:24224" - - "24224:24224/udp" - - # opentelemetry collector - otel-collector: - image: otel/opentelemetry-collector:latest - ports: - - 55680:55680 - - 4317:4317 - volumes: - - ./otlp_collector/otel-config.yaml:/etc/otel-collector-config.yaml - command: ["--config", "/etc/otel-collector-config.yaml"] - - # tempo is a distributed tracing backend - tempo: - image: grafana/tempo:latest - command: [ "-config.file=/etc/tempo.yaml" ] - volumes: - - ./tempo/tempo.yaml:/etc/tempo.yaml - - ./tempo/tempo-data:/tmp/tempo - ports: - - 3102:3100 - diff --git a/services/docker-compose/docker-compose-register.yml b/services/docker-compose/docker-compose-register.yml deleted file mode 100644 index acd06cad84bc638b050ee8a3be0ba49b7324cd46..0000000000000000000000000000000000000000 --- a/services/docker-compose/docker-compose-register.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: '3.7' - -services: - register: - ports: - - 8084:8080 - volumes: - - ../register:/usr/src/app - environment: - - CAPIF_PRIV_KEY=${CAPIF_PRIV_KEY} - - VAULT_HOSTNAME=vault - - VAULT_ACCESS_TOKEN=dev-only-token - - VAULT_PORT=8200 - extra_hosts: - - host.docker.internal:host-gateway - - vault:host-gateway - restart: unless-stopped - image: public.ecr.aws/o2v4a8t6/opencapif/register:v3.1.4 - depends_on: - - mongo_register - - mongo_register: - image: mongo:6.0.2 - restart: unless-stopped - ports: - - 28017:27017 - environment: - MONGO_INITDB_ROOT_USERNAME: root - MONGO_INITDB_ROOT_PASSWORD: example - -networks: - default: - name: capif-network - external: true - - - diff --git a/services/docker-compose/docker-compose-vault.yml b/services/docker-compose/docker-compose-vault.yml deleted file mode 100644 index cd11e0aebd25a52089335fb3ef0430faf1b61ef4..0000000000000000000000000000000000000000 --- a/services/docker-compose/docker-compose-vault.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: '3.7' -services: - vault: - build: - context: ../vault - restart: unless-stopped - ports: - - 8200:8200 - cap_add: - - IPC_LOCK - environment: - - VAULT_DEV_ROOT_TOKEN_ID=dev-only-token - - VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200 - volumes: - - ./vault/data:/vault/data - - ./vault/config:/vault/config diff --git a/services/docker-compose/run.sh b/services/docker-compose/run.sh deleted file mode 100755 index 0bde6dbbec26c95ae11ec05079fdc9e821ebc812..0000000000000000000000000000000000000000 --- a/services/docker-compose/run.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -HOSTNAME=capifcore -MONITORING_STATE=false -DEPLOY=all - -#Needed to avoid write permissions on bind volumes with prometheus and grafana -DUID=$(id -u) -DGID=$(id -g) - -# Get docker compose version -docker_version=$(docker compose version --short | cut -d',' -f1) -IFS='.' read -ra version_components <<< "$docker_version" - -if [ "${version_components[0]}" -ge 2 ] && [ "${version_components[1]}" -ge 10 ]; then - echo "Docker compose version it greater than 2.10" -else - echo "Docker compose version is not valid. Should be greater than 2.10" - exit 1 -fi - -# Read params -while getopts ":h:m:" opt; do - case $opt in - h) - HOSTNAME="$OPTARG" - ;; - m) - MONITORING_STATE="$OPTARG" - ;; - \?) - echo "Opción no válida: -$OPTARG" >&2 - exit 1 - ;; - :) - echo "La opción -$OPTARG requiere un argumento." >&2 - exit 1 - ;; - esac -done - -echo Nginx hostname will be $HOSTNAME, deploy $DEPLOY, monitoring $MONITORING_STATE - -if [ "$MONITORING_STATE" == "true" ] ; then - echo '***Monitoring set as true***' - echo '***Creating Monitoging stack***' - - DUID=$DUID DGID=$DGID docker compose -f "./docker-compose-monitoring.yml" up --detach - status=$? - if [ $status -eq 0 ]; then - echo "*** Monitoring Stack Runing ***" - else - echo "*** Monitoring Stack failed to start ***" - exit $status - fi -fi - -docker network create capif-network - -docker compose -f "./docker-compose-vault.yml" up --detach --build - -status=$? -if [ $status -eq 0 ]; then - echo "*** Vault Service Runing ***" -else - echo "*** Vault failed to start ***" - exit $status -fi - -CAPIF_HOSTNAME=$HOSTNAME MONITORING=$MONITORING_STATE docker compose -f "./docker-compose-capif.yml" up --detach --build - -status=$? -if [ $status -eq 0 ]; then - echo "*** All Capif services are running ***" -else - echo "*** Some Capif services failed to start ***" - exit $status -fi - - -CAPIF_PRIV_KEY_BASE_64=$(echo "$(cat nginx/certs/server.key)") -CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 docker compose -f "./docker-compose-register.yml" up --detach --build - -status=$? -if [ $status -eq 0 ]; then - echo "*** Register Service are running ***" -else - echo "*** Register Service failed to start ***" -fi - -exit $status diff --git a/services/docker-compose/stop.sh b/services/docker-compose/stop.sh deleted file mode 100755 index 4a6435482890009601e749f897e965fd956f291c..0000000000000000000000000000000000000000 --- a/services/docker-compose/stop.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash - -HOSTNAME=capifcore -MONITORING_STATE=false -DEPLOY=all -DEPLOY_BACKOFFICE=${DEPLOY_BACKOFFICE:-false} - -# Get docker compose version -docker_version=$(docker compose version --short | cut -d',' -f1) -IFS='.' read -ra version_components <<< "$docker_version" - -if [ "${version_components[0]}" -ge 2 ] && [ "${version_components[1]}" -ge 10 ]; then - echo "Docker compose version it greater than 2.10" -else - echo "Docker compose version is not valid. Should be greater than 2.10" - exit 1 -fi - -# Read params -while getopts ":h:m:" opt; do - case $opt in - h) - HOSTNAME="$OPTARG" - ;; - m) - MONITORING_STATE="$OPTARG" - ;; - \?) - echo "Opción no válida: -$OPTARG" >&2 - exit 1 - ;; - :) - echo "La opción -$OPTARG requiere un argumento." >&2 - exit 1 - ;; - esac -done - -if [ "$MONITORING_STATE" = true ] ; then - docker compose -f "./docker-compose-monitoring.yml" down - status=$? - if [ $status -eq 0 ]; then - echo "*** Monitoring Stack Stopped ***" - else - echo "*** Monitoring Stack failed to stopt ***" - exit $status - fi -fi - -docker compose -f "./docker-compose-vault.yml" down - -status=$? -if [ $status -eq 0 ]; then - echo "*** Vault Service Stopped ***" -else - echo "*** Vault failed to stop ***" - exit $status -fi - - - -CAPIF_HOSTNAME=$HOSTNAME MONITORING=$MONITORING_STATE docker compose -f "./docker-compose-capif.yml" down - -status=$? -if [ $status -eq 0 ]; then - echo "*** All Capif services are stopped ***" -else - echo "*** Some Capif services failed to stop ***" - exit $status -fi - - -CAPIF_PRIV_KEY_BASE_64=$(echo "$(cat nginx/certs/server.key)") -CAPIF_PRIV_KEY=$CAPIF_PRIV_KEY_BASE_64 docker compose -f "./docker-compose-register.yml" down - -status=$? -if [ $status -eq 0 ]; then - echo "*** Register Service are stopped ***" -else - echo "*** Register Service failed to stop ***" -fi - - -if [ $DEPLOY_BACKOFFICE = "false" ]; then - exit $status -fi - - -docker compose -f "./docker-compose-backoffice.yml" down - -status=$? -if [ $status -eq 0 ]; then - echo "*** Backoffice Service are stopped ***" -else - echo "*** Backoffice Service failed to stop ***" -fi - -exit $status diff --git a/monitoring/docker-compose.yml b/services/monitoring/docker-compose.yml similarity index 100% rename from monitoring/docker-compose.yml rename to services/monitoring/docker-compose.yml diff --git a/monitoring/fluent_bit/fluent-bit.conf b/services/monitoring/fluent_bit/fluent-bit.conf similarity index 100% rename from monitoring/fluent_bit/fluent-bit.conf rename to services/monitoring/fluent_bit/fluent-bit.conf diff --git a/monitoring/grafana/grafana_config/grafana.ini b/services/monitoring/grafana/grafana_config/grafana.ini similarity index 100% rename from monitoring/grafana/grafana_config/grafana.ini rename to services/monitoring/grafana/grafana_config/grafana.ini diff --git a/monitoring/grafana/grafana_dashboards/Docker-monitoring.json b/services/monitoring/grafana/grafana_dashboards/Docker-monitoring.json similarity index 100% rename from monitoring/grafana/grafana_dashboards/Docker-monitoring.json rename to services/monitoring/grafana/grafana_dashboards/Docker-monitoring.json diff --git a/monitoring/grafana/grafana_dashboards/Loki-Logs.json b/services/monitoring/grafana/grafana_dashboards/Loki-Logs.json similarity index 100% rename from monitoring/grafana/grafana_dashboards/Loki-Logs.json rename to services/monitoring/grafana/grafana_dashboards/Loki-Logs.json diff --git a/monitoring/grafana/grafana_db/.keep b/services/monitoring/grafana/grafana_db/.keep similarity index 100% rename from monitoring/grafana/grafana_db/.keep rename to services/monitoring/grafana/grafana_db/.keep diff --git a/services/monitoring/grafana/grafana_db/alerting/1/__default__.tmpl b/services/monitoring/grafana/grafana_db/alerting/1/__default__.tmpl new file mode 100644 index 0000000000000000000000000000000000000000..b8633d1689b2d3ceb6607e914b0a2c4d77192175 --- /dev/null +++ b/services/monitoring/grafana/grafana_db/alerting/1/__default__.tmpl @@ -0,0 +1,53 @@ + +{{ define "__subject" }}[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ if gt (.Alerts.Resolved | len) 0 }}, RESOLVED:{{ .Alerts.Resolved | len }}{{ end }}{{ end }}] {{ .GroupLabels.SortedPairs.Values | join " " }} {{ if gt (len .CommonLabels) (len .GroupLabels) }}({{ with .CommonLabels.Remove .GroupLabels.Names }}{{ .Values | join " " }}{{ end }}){{ end }}{{ end }} + +{{ define "__text_values_list" }}{{ if len .Values }}{{ $first := true }}{{ range $refID, $value := .Values -}} +{{ if $first }}{{ $first = false }}{{ else }}, {{ end }}{{ $refID }}={{ $value }}{{ end -}} +{{ else }}[no value]{{ end }}{{ end }} + +{{ define "__text_alert_list" }}{{ range . }} +Value: {{ template "__text_values_list" . }} +Labels: +{{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} +{{ end }}Annotations: +{{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} +{{ end }}{{ if gt (len .GeneratorURL) 0 }}Source: {{ .GeneratorURL }} +{{ end }}{{ if gt (len .SilenceURL) 0 }}Silence: {{ .SilenceURL }} +{{ end }}{{ if gt (len .DashboardURL) 0 }}Dashboard: {{ .DashboardURL }} +{{ end }}{{ if gt (len .PanelURL) 0 }}Panel: {{ .PanelURL }} +{{ end }}{{ end }}{{ end }} + +{{ define "default.title" }}{{ template "__subject" . }}{{ end }} + +{{ define "default.message" }}{{ if gt (len .Alerts.Firing) 0 }}**Firing** +{{ template "__text_alert_list" .Alerts.Firing }}{{ if gt (len .Alerts.Resolved) 0 }} + +{{ end }}{{ end }}{{ if gt (len .Alerts.Resolved) 0 }}**Resolved** +{{ template "__text_alert_list" .Alerts.Resolved }}{{ end }}{{ end }} + + +{{ define "__teams_text_alert_list" }}{{ range . }} +Value: {{ template "__text_values_list" . }} +Labels: +{{ range .Labels.SortedPairs }} - {{ .Name }} = {{ .Value }} +{{ end }} +Annotations: +{{ range .Annotations.SortedPairs }} - {{ .Name }} = {{ .Value }} +{{ end }} +{{ if gt (len .GeneratorURL) 0 }}Source: [{{ .GeneratorURL }}]({{ .GeneratorURL }}) + +{{ end }}{{ if gt (len .SilenceURL) 0 }}Silence: [{{ .SilenceURL }}]({{ .SilenceURL }}) + +{{ end }}{{ if gt (len .DashboardURL) 0 }}Dashboard: [{{ .DashboardURL }}]({{ .DashboardURL }}) + +{{ end }}{{ if gt (len .PanelURL) 0 }}Panel: [{{ .PanelURL }}]({{ .PanelURL }}) + +{{ end }} +{{ end }}{{ end }} + + +{{ define "teams.default.message" }}{{ if gt (len .Alerts.Firing) 0 }}**Firing** +{{ template "__teams_text_alert_list" .Alerts.Firing }}{{ if gt (len .Alerts.Resolved) 0 }} + +{{ end }}{{ end }}{{ if gt (len .Alerts.Resolved) 0 }}**Resolved** +{{ template "__teams_text_alert_list" .Alerts.Resolved }}{{ end }}{{ end }} diff --git a/monitoring/grafana_config/provisioning/dashboards/default.yaml b/services/monitoring/grafana/grafana_db/dashboards/Docker-monitoring.json similarity index 100% rename from monitoring/grafana_config/provisioning/dashboards/default.yaml rename to services/monitoring/grafana/grafana_db/dashboards/Docker-monitoring.json diff --git a/monitoring/grafana_config/provisioning/datasources/datasources.yaml b/services/monitoring/grafana/grafana_db/dashboards/Loki-Logs.json similarity index 100% rename from monitoring/grafana_config/provisioning/datasources/datasources.yaml rename to services/monitoring/grafana/grafana_db/dashboards/Loki-Logs.json diff --git a/services/monitoring/grafana/grafana_db/grafana.db b/services/monitoring/grafana/grafana_db/grafana.db new file mode 100644 index 0000000000000000000000000000000000000000..78af2bae35edeccd5788efab79568fa54281fb41 Binary files /dev/null and b/services/monitoring/grafana/grafana_db/grafana.db differ diff --git a/monitoring/grafana/grafana_provisioning/grafana-datasources.yaml b/services/monitoring/grafana/grafana_provisioning/grafana-datasources.yaml similarity index 100% rename from monitoring/grafana/grafana_provisioning/grafana-datasources.yaml rename to services/monitoring/grafana/grafana_provisioning/grafana-datasources.yaml diff --git a/monitoring/grafana/grafana_provisioning/grafana-default-provisioning.yaml b/services/monitoring/grafana/grafana_provisioning/grafana-default-provisioning.yaml similarity index 100% rename from monitoring/grafana/grafana_provisioning/grafana-default-provisioning.yaml rename to services/monitoring/grafana/grafana_provisioning/grafana-default-provisioning.yaml diff --git a/monitoring/prometheus/alert.rules/.keep b/services/monitoring/grafana_config/provisioning/dashboards/default.yaml old mode 100644 new mode 100755 similarity index 100% rename from monitoring/prometheus/alert.rules/.keep rename to services/monitoring/grafana_config/provisioning/dashboards/default.yaml diff --git a/monitoring/prometheus/prometheus_db/.keep b/services/monitoring/grafana_config/provisioning/datasources/datasources.yaml old mode 100644 new mode 100755 similarity index 100% rename from monitoring/prometheus/prometheus_db/.keep rename to services/monitoring/grafana_config/provisioning/datasources/datasources.yaml diff --git a/monitoring/otlp_collector/otel-config.yaml b/services/monitoring/otlp_collector/otel-config.yaml similarity index 100% rename from monitoring/otlp_collector/otel-config.yaml rename to services/monitoring/otlp_collector/otel-config.yaml diff --git a/monitoring/tempo/tempo-data/.keep b/services/monitoring/prometheus/alert.rules/.keep similarity index 100% rename from monitoring/tempo/tempo-data/.keep rename to services/monitoring/prometheus/alert.rules/.keep diff --git a/monitoring/prometheus/prometheus.yml b/services/monitoring/prometheus/prometheus.yml similarity index 100% rename from monitoring/prometheus/prometheus.yml rename to services/monitoring/prometheus/prometheus.yml diff --git a/services/monitoring/prometheus/prometheus_db/.keep b/services/monitoring/prometheus/prometheus_db/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/chunks/000001 b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/chunks/000001 new file mode 100644 index 0000000000000000000000000000000000000000..1b94dcf33ae22898b49c551f5cbd93e0f92bd1fb Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/chunks/000001 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/index b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/index new file mode 100644 index 0000000000000000000000000000000000000000..6974dd2b07146016d5fede24d4c7b8c7bececd54 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/index differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/meta.json b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/meta.json new file mode 100644 index 0000000000000000000000000000000000000000..6155762fea7fa4fbfbed7d2846b9f15d1d8dd400 --- /dev/null +++ b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/meta.json @@ -0,0 +1,30 @@ +{ + "ulid": "01HVSTBRKRPHTKGXWB222EZ4ZY", + "minTime": 1713449906811, + "maxTime": 1713463200000, + "stats": { + "numSamples": 920061, + "numSeries": 8403, + "numChunks": 14070 + }, + "compaction": { + "level": 2, + "sources": [ + "01HVS3ZJGFCP273BZ2227QCBQP", + "01HVSCQJ99F8BAWEZYFAW5DPMG" + ], + "parents": [ + { + "ulid": "01HVS3ZJGFCP273BZ2227QCBQP", + "minTime": 1713449906811, + "maxTime": 1713456000000 + }, + { + "ulid": "01HVSCQJ99F8BAWEZYFAW5DPMG", + "minTime": 1713457156573, + "maxTime": 1713463200000 + } + ] + }, + "version": 1 +} \ No newline at end of file diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/tombstones b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/tombstones new file mode 100644 index 0000000000000000000000000000000000000000..95fb83272e6f55edeee7e5d86bdec5fcd217eae0 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVSTBRKRPHTKGXWB222EZ4ZY/tombstones differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/chunks/000001 b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/chunks/000001 new file mode 100644 index 0000000000000000000000000000000000000000..614ed33d71a4964ff8fe700fa69da483fe3d6a23 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/chunks/000001 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/index b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/index new file mode 100644 index 0000000000000000000000000000000000000000..84d895731cebb63997dadb85f77e800fd3456dd1 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/index differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/meta.json b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/meta.json new file mode 100644 index 0000000000000000000000000000000000000000..9f6bc9508688e41649976c6bc2eb5ce1df5134f4 --- /dev/null +++ b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/meta.json @@ -0,0 +1,17 @@ +{ + "ulid": "01HVT7QVBXPCA5GM8AH9RQ5MJ3", + "minTime": 1713486181515, + "maxTime": 1713492000000, + "stats": { + "numSamples": 10197, + "numSeries": 5289, + "numChunks": 5289 + }, + "compaction": { + "level": 1, + "sources": [ + "01HVT7QVBXPCA5GM8AH9RQ5MJ3" + ] + }, + "version": 1 +} \ No newline at end of file diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/tombstones b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/tombstones new file mode 100644 index 0000000000000000000000000000000000000000..95fb83272e6f55edeee7e5d86bdec5fcd217eae0 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVT7QVBXPCA5GM8AH9RQ5MJ3/tombstones differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/chunks/000001 b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/chunks/000001 new file mode 100644 index 0000000000000000000000000000000000000000..4a8e7a3f05207aa8541d191d47b0ea4ca9056b89 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/chunks/000001 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/index b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/index new file mode 100644 index 0000000000000000000000000000000000000000..4d5a640cf495a058c837d34ff3321a8d31e9648d Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/index differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/meta.json b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/meta.json new file mode 100644 index 0000000000000000000000000000000000000000..42a6b7809751245b7743413cb9965a21b6779d72 --- /dev/null +++ b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/meta.json @@ -0,0 +1,17 @@ +{ + "ulid": "01HVTE261SHE47WJN66FKM6MEN", + "minTime": 1713494454205, + "maxTime": 1713499200000, + "stats": { + "numSamples": 20493, + "numSeries": 5339, + "numChunks": 5339 + }, + "compaction": { + "level": 1, + "sources": [ + "01HVTE261SHE47WJN66FKM6MEN" + ] + }, + "version": 1 +} \ No newline at end of file diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/tombstones b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/tombstones new file mode 100644 index 0000000000000000000000000000000000000000..95fb83272e6f55edeee7e5d86bdec5fcd217eae0 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE261SHE47WJN66FKM6MEN/tombstones differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/chunks/000001 b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/chunks/000001 new file mode 100644 index 0000000000000000000000000000000000000000..dda53c1d0000dc7fc1bc7e0487326cd61a48d68a Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/chunks/000001 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/index b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/index new file mode 100644 index 0000000000000000000000000000000000000000..25612cd3bd86b2117442217faf43d1ade95d1e04 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/index differ diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/meta.json b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/meta.json new file mode 100644 index 0000000000000000000000000000000000000000..fb9fd66257610b35de913dd511dfe15288ab7187 --- /dev/null +++ b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/meta.json @@ -0,0 +1,36 @@ +{ + "ulid": "01HVTE269MJJWKFVJEQEY9F74T", + "minTime": 1713466825347, + "maxTime": 1713484800000, + "stats": { + "numSamples": 71981, + "numSeries": 5472, + "numChunks": 16142 + }, + "compaction": { + "level": 2, + "sources": [ + "01HVSMKAKMCPJSN3P1AN2N6775", + "01HVSTBR4Q8XKAJN8X9M0V1ZF0", + "01HVSZ68RW5DAN1CZTZPJ188X5" + ], + "parents": [ + { + "ulid": "01HVSMKAKMCPJSN3P1AN2N6775", + "minTime": 1713466825347, + "maxTime": 1713470400000 + }, + { + "ulid": "01HVSTBR4Q8XKAJN8X9M0V1ZF0", + "minTime": 1713470425347, + "maxTime": 1713477600000 + }, + { + "ulid": "01HVSZ68RW5DAN1CZTZPJ188X5", + "minTime": 1713478673132, + "maxTime": 1713484800000 + } + ] + }, + "version": 1 +} \ No newline at end of file diff --git a/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/tombstones b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/tombstones new file mode 100644 index 0000000000000000000000000000000000000000..95fb83272e6f55edeee7e5d86bdec5fcd217eae0 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/01HVTE269MJJWKFVJEQEY9F74T/tombstones differ diff --git a/services/monitoring/prometheus/prometheus_db/data/chunks_head/000004 b/services/monitoring/prometheus/prometheus_db/data/chunks_head/000004 new file mode 100644 index 0000000000000000000000000000000000000000..f13c22baa8e3ec185345329a629645cc6ca07b7e Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/chunks_head/000004 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/queries.active b/services/monitoring/prometheus/prometheus_db/data/queries.active new file mode 100644 index 0000000000000000000000000000000000000000..8bfef0eabd49630443efda04a88fc8fb2b6b67bd Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/queries.active differ diff --git a/services/monitoring/prometheus/prometheus_db/data/wal/00000249 b/services/monitoring/prometheus/prometheus_db/data/wal/00000249 new file mode 100644 index 0000000000000000000000000000000000000000..37797ce1badae2f08d16b0134abb5b8c17cb144f Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/wal/00000249 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/wal/00000250 b/services/monitoring/prometheus/prometheus_db/data/wal/00000250 new file mode 100644 index 0000000000000000000000000000000000000000..6974b1fdfe7fc6cfb933d64ce817020d82c8662e Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/wal/00000250 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/wal/00000251 b/services/monitoring/prometheus/prometheus_db/data/wal/00000251 new file mode 100644 index 0000000000000000000000000000000000000000..cddf5dd82b7e4fcf972e137364d70dcb3884f4fa Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/wal/00000251 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/wal/00000252 b/services/monitoring/prometheus/prometheus_db/data/wal/00000252 new file mode 100644 index 0000000000000000000000000000000000000000..8fe8cace7f3674ac08fc367179a81ee1891ee6e8 Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/wal/00000252 differ diff --git a/services/monitoring/prometheus/prometheus_db/data/wal/checkpoint.00000248/00000000 b/services/monitoring/prometheus/prometheus_db/data/wal/checkpoint.00000248/00000000 new file mode 100644 index 0000000000000000000000000000000000000000..bd5980715e545c437268b226f0ab272030301eff Binary files /dev/null and b/services/monitoring/prometheus/prometheus_db/data/wal/checkpoint.00000248/00000000 differ diff --git a/services/monitoring/prometheus/prometheus_db/prometheus.yml b/services/monitoring/prometheus/prometheus_db/prometheus.yml new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/services/monitoring/tempo/tempo-data/.keep b/services/monitoring/tempo/tempo-data/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/bloom-0 b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/bloom-0 new file mode 100644 index 0000000000000000000000000000000000000000..74f5867c9dac45c8d6f18e231aaf6f34bf26c75f Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/bloom-0 differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/data.parquet b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9c357d6719200a7ea2eb635df9c5d96d65e57977 Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/data.parquet differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/index b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/index new file mode 100644 index 0000000000000000000000000000000000000000..3f84243a59c09013f1c55d68dd8c708875c781ba --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/index @@ -0,0 +1 @@ +{"rowGroups":["/mzEdGWlxwxfAkAiAcO0lA=="]} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/meta.compacted.json b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/meta.compacted.json new file mode 100644 index 0000000000000000000000000000000000000000..17f13d2105c478b544ab68f44d33d273f60a7e8e --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/1af0217d-4c51-45cc-8666-8d3905037270/meta.compacted.json @@ -0,0 +1 @@ +{"format":"vParquet3","blockID":"1af0217d-4c51-45cc-8666-8d3905037270","minID":"BUyRL6KsYdeKXOv3O/6oHw==","maxID":"/mzEdGWlxwxfAkAiAcO0lA==","tenantID":"single-tenant","startTime":"2024-04-18T15:13:03Z","endTime":"2024-04-18T15:19:11Z","totalObjects":42,"size":24160,"compactionLevel":1,"encoding":"none","indexPageSize":0,"totalRecords":1,"dataEncoding":"","bloomShards":1,"footerSize":11754} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/bloom-0 b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/bloom-0 new file mode 100644 index 0000000000000000000000000000000000000000..d0717dad6b0193ce19f69c70b02fce34b26f5c92 Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/bloom-0 differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/data.parquet b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..12c980dbdf48675bd8d65eb24d7517806654fb47 Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/data.parquet differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/index b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/index new file mode 100644 index 0000000000000000000000000000000000000000..3c629467f86857f46f78cba083f803a5bd8a8e57 --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/index @@ -0,0 +1 @@ +{"rowGroups":["56VgHse1LH0lc6AVUT+rdw=="]} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/meta.compacted.json b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/meta.compacted.json new file mode 100644 index 0000000000000000000000000000000000000000..2fa72acdd17a766093406b4a7df5b16439881e72 --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/555c3df1-a215-4c31-8c48-67847ea0690c/meta.compacted.json @@ -0,0 +1 @@ +{"format":"vParquet3","blockID":"555c3df1-a215-4c31-8c48-67847ea0690c","minID":"EsaWvRUC0PPk4KekLVmvsg==","maxID":"56VgHse1LH0lc6AVUT+rdw==","tenantID":"single-tenant","startTime":"2024-04-18T15:13:03Z","endTime":"2024-04-18T15:13:08Z","totalObjects":12,"size":20432,"compactionLevel":0,"encoding":"none","indexPageSize":0,"totalRecords":1,"dataEncoding":"","bloomShards":1,"footerSize":11618} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/bloom-0 b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/bloom-0 new file mode 100644 index 0000000000000000000000000000000000000000..9bffb4effa62bf030769da9fc79988e97e2cb361 Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/bloom-0 differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/data.parquet b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8ebb4061c29a03b50e24fbc65e353df49e722abf Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/data.parquet differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/index b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/index new file mode 100644 index 0000000000000000000000000000000000000000..3f84243a59c09013f1c55d68dd8c708875c781ba --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/index @@ -0,0 +1 @@ +{"rowGroups":["/mzEdGWlxwxfAkAiAcO0lA=="]} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/meta.compacted.json b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/meta.compacted.json new file mode 100644 index 0000000000000000000000000000000000000000..6b25c49e8c3ff0ad79cdee6715a083484ce539a6 --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/5be550d3-0d77-41ea-a950-080fd772c13f/meta.compacted.json @@ -0,0 +1 @@ +{"format":"vParquet3","blockID":"5be550d3-0d77-41ea-a950-080fd772c13f","minID":"BUyRL6KsYdeKXOv3O/6oHw==","maxID":"/mzEdGWlxwxfAkAiAcO0lA==","tenantID":"single-tenant","startTime":"2024-04-18T15:18:53Z","endTime":"2024-04-18T15:19:11Z","totalObjects":30,"size":22097,"compactionLevel":0,"encoding":"none","indexPageSize":0,"totalRecords":1,"dataEncoding":"","bloomShards":1,"footerSize":11704} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/bloom-0 b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/bloom-0 new file mode 100644 index 0000000000000000000000000000000000000000..5ee9467db2663a95410c6c80fca73e5d09aee619 Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/bloom-0 differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/data.parquet b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..10fd45a41e9ee75fb061bd6392fde58b4a4682ef Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/data.parquet differ diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/index b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/index new file mode 100644 index 0000000000000000000000000000000000000000..3ba2a72fe26e18ca079a448b9cb9f6204b9dade2 --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/index @@ -0,0 +1 @@ +{"rowGroups":["+sDcdjEBMHVWJKxkHdkyiQ=="]} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/meta.compacted.json b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/meta.compacted.json new file mode 100644 index 0000000000000000000000000000000000000000..ee0e984f93ae33f22d425f3596c04f6323d86242 --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/single-tenant/64151eb3-1928-484a-b510-cec670a873e9/meta.compacted.json @@ -0,0 +1 @@ +{"format":"vParquet3","blockID":"64151eb3-1928-484a-b510-cec670a873e9","minID":"ApN785trkRliSAEQjQzIXQ==","maxID":"+sDcdjEBMHVWJKxkHdkyiQ==","tenantID":"single-tenant","startTime":"2024-02-06T09:02:01Z","endTime":"2024-02-06T09:02:23Z","totalObjects":34,"size":23588,"compactionLevel":0,"encoding":"none","indexPageSize":0,"totalRecords":1,"dataEncoding":"","bloomShards":1,"footerSize":11760} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/blocks/single-tenant/index.json.gz b/services/monitoring/tempo/tempo-data/blocks/single-tenant/index.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2ec4a4185068b2d3a0eeef0657068f7c3d93d28c Binary files /dev/null and b/services/monitoring/tempo/tempo-data/blocks/single-tenant/index.json.gz differ diff --git a/services/monitoring/tempo/tempo-data/blocks/tempo_cluster_seed.json b/services/monitoring/tempo/tempo-data/blocks/tempo_cluster_seed.json new file mode 100644 index 0000000000000000000000000000000000000000..75666366a5a1eef4adb5d313d06d1433a74cf92b --- /dev/null +++ b/services/monitoring/tempo/tempo-data/blocks/tempo_cluster_seed.json @@ -0,0 +1 @@ +{"UID":"35359dcb-8d3d-444c-a217-4c200ae8ffd2","created_at":"2023-11-27T10:46:24.207602042Z","version":{"version":"r124-c00e7ef","revision":"c00e7ef11","branch":"r124","buildUser":"","buildDate":"","goVersion":"go1.21.3"}} \ No newline at end of file diff --git a/services/monitoring/tempo/tempo-data/wal/780e2b0c-10c8-4c9e-84a5-a9befefc50ab+single-tenant+vParquet3/0000000001 b/services/monitoring/tempo/tempo-data/wal/780e2b0c-10c8-4c9e-84a5-a9befefc50ab+single-tenant+vParquet3/0000000001 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/monitoring/tempo/tempo.yaml b/services/monitoring/tempo/tempo.yaml similarity index 100% rename from monitoring/tempo/tempo.yaml rename to services/monitoring/tempo/tempo.yaml diff --git a/services/run.sh b/services/run.sh index cf500a936202a281b3232992257e44fc582c9bb5..fa10d8ada45ff558f15509550fc68247bb05441c 100755 --- a/services/run.sh +++ b/services/run.sh @@ -69,7 +69,7 @@ if [ "$MONITORING_STATE" == "true" ] ; then echo '***Monitoring set as true***' echo '***Creating Monitoring stack***' - DUID=$DUID DGID=$DGID docker compose -f "../monitoring/docker-compose.yml" up --detach + DUID=$DUID DGID=$DGID docker compose -f "./monitoring/docker-compose.yml" up --detach status=$? if [ $status -eq 0 ]; then echo "*** Monitoring Stack Runing ***"