From 65444b9062bbcdb635c08b3e7ad56c1c57cdc4c6 Mon Sep 17 00:00:00 2001 From: gifrerenom <lluis.gifre@cttc.es> Date: Wed, 20 Mar 2024 12:22:28 +0000 Subject: [PATCH] Manifests: - Recovered default log levels to INFO - Corrected Formatting of manifest files --- manifests/bgpls_speakerservice.yaml | 60 ++-- manifests/cachingservice.yaml | 50 ++-- manifests/contextservice.yaml | 94 +++---- manifests/dbscanservingservice.yaml | 68 ++--- manifests/deviceservice.yaml | 70 ++--- manifests/dltservice.yaml | 118 ++++---- manifests/e2e_orchestratorservice.yaml | 68 ++--- manifests/forecasterservice.yaml | 86 +++--- manifests/interdomainservice.yaml | 64 ++--- manifests/l3_attackmitigatorservice.yaml | 86 +++--- .../l3_centralizedattackdetectorservice.yaml | 102 +++---- .../l3_distributedattackdetectorservice.yaml | 48 ++-- manifests/load_generatorservice.yaml | 60 ++-- manifests/mock_blockchain.yaml | 50 ++-- manifests/monitoringservice.yaml | 66 ++--- manifests/nbiservice.yaml | 70 ++--- manifests/nginx_ingress_http.yaml | 74 ++--- manifests/opticalattackdetectorservice.yaml | 78 +++--- manifests/opticalattackmanagerservice.yaml | 66 ++--- manifests/opticalattackmitigatorservice.yaml | 68 ++--- manifests/opticalcontrollerservice.yaml | 60 ++-- manifests/pathcompservice.yaml | 132 ++++----- manifests/policyservice.yaml | 12 +- manifests/prometheus.yaml | 31 ++- manifests/servicemonitors.yaml | 256 +++++++++--------- manifests/serviceservice.yaml | 72 ++--- manifests/sliceservice.yaml | 82 +++--- manifests/teservice.yaml | 78 +++--- manifests/webuiservice.yaml | 146 +++++----- manifests/ztpservice.yaml | 12 +- 30 files changed, 1164 insertions(+), 1163 deletions(-) diff --git a/manifests/bgpls_speakerservice.yaml b/manifests/bgpls_speakerservice.yaml index aa985d13e..bda0d4cf5 100644 --- a/manifests/bgpls_speakerservice.yaml +++ b/manifests/bgpls_speakerservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: localhost:32000/tfs/bgpls_speaker:dev - imagePullPolicy: Always - ports: - - containerPort: 20030 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:20030"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:20030"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: localhost:32000/tfs/bgpls_speaker:dev + imagePullPolicy: Always + ports: + - containerPort: 20030 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:20030"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:20030"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: bgpls-speakerservice ports: - - name: grpc - protocol: TCP - port: 20030 - targetPort: 20030 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 20030 + targetPort: 20030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/cachingservice.yaml b/manifests/cachingservice.yaml index be8fced49..ee6e02e00 100644 --- a/manifests/cachingservice.yaml +++ b/manifests/cachingservice.yaml @@ -27,28 +27,28 @@ spec: app: cachingservice spec: containers: - - name: redis - image: redis:7.0-alpine - env: - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - ports: - - containerPort: 6379 - name: client - command: ["redis-server"] - args: - - --requirepass - - $(REDIS_PASSWORD) - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi + - name: redis + image: redis:7.0-alpine + env: + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + ports: + - containerPort: 6379 + name: client + command: ["redis-server"] + args: + - --requirepass + - $(REDIS_PASSWORD) + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -59,6 +59,6 @@ spec: selector: app: cachingservice ports: - - name: redis - port: 6379 - targetPort: 6379 + - name: redis + port: 6379 + targetPort: 6379 diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 5d0e6102c..c7dc59625 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -30,39 +30,39 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/context:latest - imagePullPolicy: Always - ports: - - containerPort: 1010 - - containerPort: 9192 - env: - - name: MB_BACKEND - value: "nats" - - name: LOG_LEVEL - value: "DEBUG" - - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY - value: "FALSE" - - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY - value: "FALSE" - envFrom: - - secretRef: - name: crdb-data - - secretRef: - name: nats-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:1010"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:1010"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/context:latest + imagePullPolicy: Always + ports: + - containerPort: 1010 + - containerPort: 9192 + env: + - name: MB_BACKEND + value: "nats" + - name: LOG_LEVEL + value: "INFO" + - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY + value: "FALSE" + - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY + value: "FALSE" + envFrom: + - secretRef: + name: crdb-data + - secretRef: + name: nats-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:1010"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:1010"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -75,14 +75,14 @@ spec: selector: app: contextservice ports: - - name: grpc - protocol: TCP - port: 1010 - targetPort: 1010 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 1010 + targetPort: 1010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -96,12 +96,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/dbscanservingservice.yaml b/manifests/dbscanservingservice.yaml index b5b8fc437..2bdd1c3fa 100644 --- a/manifests/dbscanservingservice.yaml +++ b/manifests/dbscanservingservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest - imagePullPolicy: Always - ports: - - containerPort: 10008 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10008"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10008"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest + imagePullPolicy: Always + ports: + - containerPort: 10008 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10008"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10008"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: dbscanservingservice ports: - - name: grpc - port: 10008 - targetPort: 10008 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10008 + targetPort: 10008 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 7f7885daf..fdc3cea02 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -31,33 +31,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/device:latest - imagePullPolicy: Always - ports: - - containerPort: 2020 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - startupProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - failureThreshold: 30 - periodSeconds: 1 - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/device:latest + imagePullPolicy: Always + ports: + - containerPort: 2020 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + startupProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -70,11 +70,11 @@ spec: selector: app: deviceservice ports: - - name: grpc - protocol: TCP - port: 2020 - targetPort: 2020 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 2020 + targetPort: 2020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 5e8f745f7..f905749b4 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -27,57 +27,57 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: connector - image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest - imagePullPolicy: Always - ports: - - containerPort: 8080 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - ## for debug purposes - #- name: DLT_GATEWAY_HOST - # value: "mock-blockchain.tfs-bchain.svc.cluster.local" - #- name: DLT_GATEWAY_PORT - # value: "50051" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:8080"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:8080"] - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi - - name: gateway - image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest - imagePullPolicy: Always - ports: - - containerPort: 50051 - #readinessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - #livenessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - resources: - requests: - cpu: 200m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: connector + image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + ## for debug purposes + #- name: DLT_GATEWAY_HOST + # value: "mock-blockchain.tfs-bchain.svc.cluster.local" + #- name: DLT_GATEWAY_PORT + # value: "50051" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi + - name: gateway + image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest + imagePullPolicy: Always + ports: + - containerPort: 50051 + #readinessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + #livenessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -90,11 +90,11 @@ spec: selector: app: dltservice ports: - - name: grpc - protocol: TCP - port: 8080 - targetPort: 8080 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 8080 + targetPort: 8080 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/e2e_orchestratorservice.yaml b/manifests/e2e_orchestratorservice.yaml index 899e17fff..90d377711 100644 --- a/manifests/e2e_orchestratorservice.yaml +++ b/manifests/e2e_orchestratorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest - imagePullPolicy: Always - ports: - - containerPort: 10050 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10050"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10050"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest + imagePullPolicy: Always + ports: + - containerPort: 10050 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10050"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10050"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: e2e-orchestratorservice ports: - - name: grpc - port: 10050 - targetPort: 10050 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10050 + targetPort: 10050 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/forecasterservice.yaml b/manifests/forecasterservice.yaml index 55d4add88..d1136f617 100644 --- a/manifests/forecasterservice.yaml +++ b/manifests/forecasterservice.yaml @@ -28,35 +28,35 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/forecaster:latest - imagePullPolicy: Always - ports: - - containerPort: 10040 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: FORECAST_TO_HISTORY_RATIO - value: "10" - startupProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - failureThreshold: 30 - periodSeconds: 1 - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/forecaster:latest + imagePullPolicy: Always + ports: + - containerPort: 10040 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: FORECAST_TO_HISTORY_RATIO + value: "10" + startupProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -69,14 +69,14 @@ spec: selector: app: forecasterservice ports: - - name: grpc - protocol: TCP - port: 10040 - targetPort: 10040 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10040 + targetPort: 10040 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -90,12 +90,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index a23583402..ad9be3a3e 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -27,30 +27,30 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/interdomain:latest - imagePullPolicy: Always - ports: - - containerPort: 10010 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: TOPOLOGY_ABSTRACTOR - value: "DISABLE" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10010"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10010"] - resources: - requests: - cpu: 250m - memory: 64Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/interdomain:latest + imagePullPolicy: Always + ports: + - containerPort: 10010 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: TOPOLOGY_ABSTRACTOR + value: "DISABLE" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10010"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10010"] + resources: + requests: + cpu: 250m + memory: 64Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -63,11 +63,11 @@ spec: selector: app: interdomainservice ports: - - name: grpc - protocol: TCP - port: 10010 - targetPort: 10010 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10010 + targetPort: 10010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/l3_attackmitigatorservice.yaml b/manifests/l3_attackmitigatorservice.yaml index ee97d2c92..973b805bd 100644 --- a/manifests/l3_attackmitigatorservice.yaml +++ b/manifests/l3_attackmitigatorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest - imagePullPolicy: Always - ports: - - containerPort: 10002 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10002"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10002"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest + imagePullPolicy: Always + ports: + - containerPort: 10002 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10002"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10002"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,13 +61,13 @@ spec: selector: app: l3-attackmitigatorservice ports: - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 - - name: grpc - port: 10002 - targetPort: 10002 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 + - name: grpc + port: 10002 + targetPort: 10002 --- apiVersion: autoscaling/v2 @@ -82,12 +82,12 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 behavior: scaleDown: stabilizationWindowSeconds: 120 @@ -100,9 +100,9 @@ metadata: labels: app: l3-attackmitigatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -112,11 +112,11 @@ spec: app: l3-attackmitigatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/l3_centralizedattackdetectorservice.yaml b/manifests/l3_centralizedattackdetectorservice.yaml index 8a3be69b6..98c5f9b41 100644 --- a/manifests/l3_centralizedattackdetectorservice.yaml +++ b/manifests/l3_centralizedattackdetectorservice.yaml @@ -27,36 +27,36 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10001 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: BATCH_SIZE - value: "256" - - name: CAD_CLASSIFICATION_THRESHOLD - value: "0.5" - - name: MONITORED_KPIS_TIME_INTERVAL_AGG - value: "60" - - name: TEST_ML_MODEL - value: "0" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10001"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10001"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10001 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: BATCH_SIZE + value: "256" + - name: CAD_CLASSIFICATION_THRESHOLD + value: "0.5" + - name: MONITORED_KPIS_TIME_INTERVAL_AGG + value: "60" + - name: TEST_ML_MODEL + value: "0" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10001"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10001"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -69,13 +69,13 @@ spec: selector: app: l3-centralizedattackdetectorservice ports: - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 - - name: grpc - port: 10001 - targetPort: 10001 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 + - name: grpc + port: 10001 + targetPort: 10001 --- apiVersion: autoscaling/v2 @@ -90,12 +90,12 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 behavior: scaleDown: stabilizationWindowSeconds: 120 @@ -107,9 +107,9 @@ metadata: labels: app: l3-centralizedattackdetectorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -119,11 +119,11 @@ spec: app: l3-centralizedattackdetectorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/l3_distributedattackdetectorservice.yaml b/manifests/l3_distributedattackdetectorservice.yaml index b363c1d5c..bf72b5cd0 100644 --- a/manifests/l3_distributedattackdetectorservice.yaml +++ b/manifests/l3_distributedattackdetectorservice.yaml @@ -27,27 +27,27 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10000 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10000"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10000"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10000 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10000"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10000"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -58,6 +58,6 @@ spec: selector: app: l3-distributedattackdetectorservice ports: - - name: grpc - port: 10000 - targetPort: 10000 + - name: grpc + port: 10000 + targetPort: 10000 diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml index 7cc6f1912..bda284ebd 100644 --- a/manifests/load_generatorservice.yaml +++ b/manifests/load_generatorservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/load_generator:latest - imagePullPolicy: Always - ports: - - containerPort: 50052 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50052"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50052"] - resources: - requests: - cpu: 256m - memory: 64Mi - limits: - cpu: 512m - memory: 128Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/load_generator:latest + imagePullPolicy: Always + ports: + - containerPort: 50052 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + resources: + requests: + cpu: 256m + memory: 64Mi + limits: + cpu: 512m + memory: 128Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: load-generatorservice ports: - - name: grpc - protocol: TCP - port: 50052 - targetPort: 50052 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 50052 + targetPort: 50052 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml index 1093610f8..806432be6 100644 --- a/manifests/mock_blockchain.yaml +++ b/manifests/mock_blockchain.yaml @@ -27,27 +27,27 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest - imagePullPolicy: Always - ports: - - containerPort: 50051 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50051"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50051"] - resources: - requests: - cpu: 100m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest + imagePullPolicy: Always + ports: + - containerPort: 50051 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -58,7 +58,7 @@ spec: selector: app: mock-blockchain ports: - - name: grpc - protocol: TCP - port: 50051 - targetPort: 50051 + - name: grpc + protocol: TCP + port: 50051 + targetPort: 50051 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index 1a2059395..3a4d43cd9 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -28,31 +28,31 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/monitoring:latest - imagePullPolicy: Always - ports: - - containerPort: 7070 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - envFrom: - - secretRef: - name: qdb-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:7070"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:7070"] - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/monitoring:latest + imagePullPolicy: Always + ports: + - containerPort: 7070 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: qdb-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:7070"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:7070"] + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -65,11 +65,11 @@ spec: selector: app: monitoringservice ports: - - name: grpc - protocol: TCP - port: 7070 - targetPort: 7070 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 7070 + targetPort: 7070 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index f5477aeb4..0a3bd1ea6 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -28,29 +28,29 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/nbi:latest - imagePullPolicy: Always - ports: - - containerPort: 8080 - - containerPort: 9090 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:9090"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:9090"] - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/nbi:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + - containerPort: 9090 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:9090"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:9090"] + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -63,15 +63,15 @@ spec: selector: app: nbiservice ports: - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - - name: grpc - protocol: TCP - port: 9090 - targetPort: 9090 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: grpc + protocol: TCP + port: 9090 + targetPort: 9090 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index e8e8a80e4..91440fb7a 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -20,40 +20,40 @@ metadata: nginx.ingress.kubernetes.io/rewrite-target: /$2 spec: rules: - - http: - paths: - - path: /webui(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 8004 - - path: /grafana(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 3000 - - path: /()(restconf/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 - - path: /()(debug-api/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 - - path: /()(bmw/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /()(debug-api/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /()(bmw/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 diff --git a/manifests/opticalattackdetectorservice.yaml b/manifests/opticalattackdetectorservice.yaml index 197c23dd2..11fd62b61 100644 --- a/manifests/opticalattackdetectorservice.yaml +++ b/manifests/opticalattackdetectorservice.yaml @@ -27,33 +27,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10006 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10006"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10006"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10006 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10006"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10006"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -66,12 +66,12 @@ spec: selector: app: opticalattackdetectorservice ports: - - name: grpc - port: 10006 - targetPort: 10006 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10006 + targetPort: 10006 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -85,12 +85,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/opticalattackmanagerservice.yaml b/manifests/opticalattackmanagerservice.yaml index f9838bcbb..4e01d76ac 100644 --- a/manifests/opticalattackmanagerservice.yaml +++ b/manifests/opticalattackmanagerservice.yaml @@ -28,33 +28,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackmanager:latest - imagePullPolicy: Always - ports: - - containerPort: 10005 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: MONITORING_INTERVAL - value: "30" - - name: OPTICALATTACKMANAGERSERVICE_LOOP_MIN_WORKERS - value: "2" # remember to align this with the resource limits - - name: OPTICALATTACKMANAGERSERVICE_LOOP_MAX_WORKERS - value: "10" # remember to align this with the resource limits - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 10000m - memory: 10240Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackmanager:latest + imagePullPolicy: Always + ports: + - containerPort: 10005 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: MONITORING_INTERVAL + value: "30" + - name: OPTICALATTACKMANAGERSERVICE_LOOP_MIN_WORKERS + value: "2" # remember to align this with the resource limits + - name: OPTICALATTACKMANAGERSERVICE_LOOP_MAX_WORKERS + value: "10" # remember to align this with the resource limits + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 10000m + memory: 10240Mi --- apiVersion: v1 kind: Service @@ -67,9 +67,9 @@ spec: selector: app: opticalattackmanagerservice ports: - - name: grpc - port: 10005 - targetPort: 10005 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10005 + targetPort: 10005 + - name: metrics + port: 9192 + targetPort: 9192 diff --git a/manifests/opticalattackmitigatorservice.yaml b/manifests/opticalattackmitigatorservice.yaml index 4d148b347..255e0fd86 100644 --- a/manifests/opticalattackmitigatorservice.yaml +++ b/manifests/opticalattackmitigatorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest - imagePullPolicy: Always - ports: - - containerPort: 10007 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10007"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10007"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest + imagePullPolicy: Always + ports: + - containerPort: 10007 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10007"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10007"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: opticalattackmitigatorservice ports: - - name: grpc - port: 10007 - targetPort: 10007 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10007 + targetPort: 10007 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/opticalcontrollerservice.yaml b/manifests/opticalcontrollerservice.yaml index 4b677ee4f..f2351720c 100644 --- a/manifests/opticalcontrollerservice.yaml +++ b/manifests/opticalcontrollerservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: localhost:32000/tfs/opticalcontroller:dev - imagePullPolicy: Never - ports: - - containerPort: 10060 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - #readinessProbe: - # exec: - # command: ["/bin/grpc_health_probe", "-addr=:10060"] - #livenessProbe: - # exec: - # command: ["/bin/grpc_health_probe", "-addr=:10060"] - resources: - requests: - cpu: 500m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: localhost:32000/tfs/opticalcontroller:dev + imagePullPolicy: Never + ports: + - containerPort: 10060 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + #readinessProbe: + # exec: + # command: ["/bin/grpc_health_probe", "-addr=:10060"] + #livenessProbe: + # exec: + # command: ["/bin/grpc_health_probe", "-addr=:10060"] + resources: + requests: + cpu: 500m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: opticalcontrollerservice ports: - - name: grpc - protocol: TCP - port: 10060 - targetPort: 10060 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10060 + targetPort: 10060 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 4beadea8f..f017e6940 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -28,54 +28,54 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: frontend - image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest - imagePullPolicy: Always - ports: - - containerPort: 10020 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: ENABLE_FORECASTER - value: "YES" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10020"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10020"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi - - name: backend - image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest - imagePullPolicy: Always - ports: - - containerPort: 8081 - #readinessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - #livenessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: frontend + image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 10020 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: ENABLE_FORECASTER + value: "NO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10020"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10020"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 8081 + #readinessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + #livenessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -88,18 +88,18 @@ spec: selector: app: pathcompservice ports: - - name: grpc - protocol: TCP - port: 10020 - targetPort: 10020 - - name: http - protocol: TCP - port: 8081 - targetPort: 8081 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10020 + targetPort: 10020 + - name: http + protocol: TCP + port: 8081 + targetPort: 8081 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -113,12 +113,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml index b34331724..b93eeda03 100644 --- a/manifests/policyservice.yaml +++ b/manifests/policyservice.yaml @@ -121,9 +121,9 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/prometheus.yaml b/manifests/prometheus.yaml index 43a766b6e..ec1ffb0c5 100644 --- a/manifests/prometheus.yaml +++ b/manifests/prometheus.yaml @@ -60,18 +60,18 @@ spec: spec: #serviceAccountName: prometheus containers: - - name: prometheus - image: prom/prometheus:v2.28.1 - ports: - - containerPort: 9090 - volumeMounts: - - name: prometheus-config-volume - mountPath: /etc/prometheus/prometheus.yml - subPath: prometheus.yml + - name: prometheus + image: prom/prometheus:v2.28.1 + ports: + - containerPort: 9090 + volumeMounts: + - name: prometheus-config-volume + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml volumes: - - name: prometheus-config-volume - configMap: - name: prometheus-config + - name: prometheus-config-volume + configMap: + name: prometheus-config restartPolicy: Always --- apiVersion: v1 @@ -85,8 +85,9 @@ spec: selector: app: prometheus ports: - - name: http - protocol: TCP - port: 9090 - targetPort: 9090 + - name: http + protocol: TCP + port: 9090 + targetPort: 9090 --- + diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml index 1f7f3322d..ccfe774b3 100644 --- a/manifests/servicemonitors.yaml +++ b/manifests/servicemonitors.yaml @@ -20,9 +20,9 @@ metadata: labels: app: contextservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -32,14 +32,14 @@ spec: app: contextservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -49,9 +49,9 @@ metadata: labels: app: deviceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -61,14 +61,14 @@ spec: app: deviceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -78,9 +78,9 @@ metadata: labels: app: serviceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -90,14 +90,14 @@ spec: app: serviceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -107,9 +107,9 @@ metadata: labels: app: sliceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -119,14 +119,14 @@ spec: app: sliceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -136,9 +136,9 @@ metadata: labels: app: pathcompservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -148,14 +148,14 @@ spec: app: pathcompservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -165,9 +165,9 @@ metadata: labels: app: monitoringservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -177,14 +177,14 @@ spec: app: monitoringservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -194,9 +194,9 @@ metadata: labels: app: dltservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -206,14 +206,14 @@ spec: app: dltservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -223,9 +223,9 @@ metadata: labels: app: interdomainservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -235,14 +235,14 @@ spec: app: interdomainservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -252,9 +252,9 @@ metadata: labels: app: policyservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -264,14 +264,14 @@ spec: app: policyservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /q/metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /q/metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -281,9 +281,9 @@ metadata: labels: app: ztpservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -293,14 +293,14 @@ spec: app: ztpservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /q/metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /q/metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -310,9 +310,9 @@ metadata: labels: app: nbiservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -322,14 +322,14 @@ spec: app: nbiservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -339,9 +339,9 @@ metadata: labels: app: load-generatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -351,14 +351,14 @@ spec: app: load-generatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -368,9 +368,9 @@ metadata: labels: app: dbscanservingservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -380,14 +380,14 @@ spec: app: dbscanservingservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -397,9 +397,9 @@ metadata: labels: app: opticalattackmitigatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -409,14 +409,14 @@ spec: app: opticalattackmitigatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -426,9 +426,9 @@ metadata: labels: app: opticalattackdetectorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -438,14 +438,14 @@ spec: app: opticalattackdetectorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -455,9 +455,9 @@ metadata: labels: app: opticalattackmanagerservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -467,11 +467,11 @@ spec: app: opticalattackmanagerservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 3865fd6c0..2fb7ebb87 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/service:latest - imagePullPolicy: Always - ports: - - containerPort: 3030 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:3030"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:3030"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/service:latest + imagePullPolicy: Always + ports: + - containerPort: 3030 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:3030"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:3030"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,14 +62,14 @@ spec: selector: app: serviceservice ports: - - name: grpc - protocol: TCP - port: 3030 - targetPort: 3030 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 3030 + targetPort: 3030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -83,12 +83,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 61f5b1d21..0daa8e70f 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -28,33 +28,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/slice:latest - imagePullPolicy: Always - ports: - - containerPort: 4040 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: SLICE_GROUPING - value: "DISABLE" - envFrom: - - secretRef: - name: qdb-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:4040"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:4040"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/slice:latest + imagePullPolicy: Always + ports: + - containerPort: 4040 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: SLICE_GROUPING + value: "DISABLE" + envFrom: + - secretRef: + name: qdb-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:4040"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:4040"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -67,14 +67,14 @@ spec: selector: app: sliceservice ports: - - name: grpc - protocol: TCP - port: 4040 - targetPort: 4040 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 4040 + targetPort: 4040 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -88,12 +88,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/teservice.yaml b/manifests/teservice.yaml index 15f1619df..ec8f2e3d6 100644 --- a/manifests/teservice.yaml +++ b/manifests/teservice.yaml @@ -30,37 +30,37 @@ spec: terminationGracePeriodSeconds: 5 shareProcessNamespace: true containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/te:latest - imagePullPolicy: Always - ports: - - containerPort: 10030 - env: - - name: ERLANG_LOGGER_LEVEL - value: "debug" - - name: ERLANG_COOKIE - value: "tfte-unsafe-cookie" - - name: ERLANG_NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: ERLANG_NODE_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - readinessProbe: - exec: - command: ["/tfte/bin/tfte", "status"] - livenessProbe: - exec: - command: ["/tfte/bin/tfte", "status"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/te:latest + imagePullPolicy: Always + ports: + - containerPort: 10030 + env: + - name: ERLANG_LOGGER_LEVEL + value: "debug" + - name: ERLANG_COOKIE + value: "tfte-unsafe-cookie" + - name: ERLANG_NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ERLANG_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + readinessProbe: + exec: + command: ["/tfte/bin/tfte", "status"] + livenessProbe: + exec: + command: ["/tfte/bin/tfte", "status"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -71,11 +71,11 @@ spec: selector: app: teservice ports: - - name: grpc - protocol: TCP - port: 10030 - targetPort: 10030 - - name: pcep - protocol: TCP - port: 4189 - targetPort: 4189 + - name: grpc + protocol: TCP + port: 10030 + targetPort: 10030 + - name: pcep + protocol: TCP + port: 4189 + targetPort: 4189 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 43caa9f04..bb2573c45 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -32,73 +32,73 @@ spec: supplementalGroups: - 0 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/webui:latest - imagePullPolicy: Always - ports: - - containerPort: 8004 - env: - - name: LOG_LEVEL - value: "INFO" - - name: WEBUISERVICE_SERVICE_BASEURL_HTTP - value: "/webui/" - readinessProbe: - httpGet: - path: /healthz/ready - port: 8004 - initialDelaySeconds: 5 - timeoutSeconds: 1 - livenessProbe: - httpGet: - path: /healthz/live - port: 8004 - initialDelaySeconds: 5 - timeoutSeconds: 1 - resources: - requests: - cpu: 50m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi - - name: grafana - image: grafana/grafana:8.5.22 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - env: - - name: GF_SERVER_ROOT_URL - value: "http://0.0.0.0:3000/grafana/" - - name: GF_SERVER_SERVE_FROM_SUB_PATH - value: "true" - readinessProbe: - failureThreshold: 60 - httpGet: - #path: /robots.txt - path: /login - port: 3000 - scheme: HTTP - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 60 - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 500m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/webui:latest + imagePullPolicy: Always + ports: + - containerPort: 8004 + env: + - name: LOG_LEVEL + value: "INFO" + - name: WEBUISERVICE_SERVICE_BASEURL_HTTP + value: "/webui/" + readinessProbe: + httpGet: + path: /healthz/ready + port: 8004 + initialDelaySeconds: 5 + timeoutSeconds: 1 + livenessProbe: + httpGet: + path: /healthz/live + port: 8004 + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + requests: + cpu: 50m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: grafana + image: grafana/grafana:8.5.22 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + env: + - name: GF_SERVER_ROOT_URL + value: "http://0.0.0.0:3000/grafana/" + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + readinessProbe: + failureThreshold: 60 + httpGet: + #path: /robots.txt + path: /login + port: 3000 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 60 + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 500m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -111,9 +111,9 @@ spec: selector: app: webuiservice ports: - - name: webui - port: 8004 - targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + - name: webui + port: 8004 + targetPort: 8004 + - name: grafana + port: 3000 + targetPort: 3000 diff --git a/manifests/ztpservice.yaml b/manifests/ztpservice.yaml index e2be80cea..323d3c4bc 100644 --- a/manifests/ztpservice.yaml +++ b/manifests/ztpservice.yaml @@ -117,9 +117,9 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 -- GitLab