diff --git a/manifests/bgpls_speakerservice.yaml b/manifests/bgpls_speakerservice.yaml index aa985d13ea81ad5f5aafdee4b62ca1c54915e527..bda0d4cf5e8734ebf7a1aab95179d39fd7a3d908 100644 --- a/manifests/bgpls_speakerservice.yaml +++ b/manifests/bgpls_speakerservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: localhost:32000/tfs/bgpls_speaker:dev - imagePullPolicy: Always - ports: - - containerPort: 20030 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:20030"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:20030"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: localhost:32000/tfs/bgpls_speaker:dev + imagePullPolicy: Always + ports: + - containerPort: 20030 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:20030"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:20030"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: bgpls-speakerservice ports: - - name: grpc - protocol: TCP - port: 20030 - targetPort: 20030 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 20030 + targetPort: 20030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/cachingservice.yaml b/manifests/cachingservice.yaml index be8fced491a823aace2b7e06be3aad1f6114d245..ee6e02e00b1ee655511a941d99b1198c1f827868 100644 --- a/manifests/cachingservice.yaml +++ b/manifests/cachingservice.yaml @@ -27,28 +27,28 @@ spec: app: cachingservice spec: containers: - - name: redis - image: redis:7.0-alpine - env: - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - ports: - - containerPort: 6379 - name: client - command: ["redis-server"] - args: - - --requirepass - - $(REDIS_PASSWORD) - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi + - name: redis + image: redis:7.0-alpine + env: + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + ports: + - containerPort: 6379 + name: client + command: ["redis-server"] + args: + - --requirepass + - $(REDIS_PASSWORD) + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -59,6 +59,6 @@ spec: selector: app: cachingservice ports: - - name: redis - port: 6379 - targetPort: 6379 + - name: redis + port: 6379 + targetPort: 6379 diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 5d0e6102cb8cd6e0717ed97cac6dd2f77d83ef4d..c7dc59625289027e9881d99e1bf34ca2bdd622a9 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -30,39 +30,39 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/context:latest - imagePullPolicy: Always - ports: - - containerPort: 1010 - - containerPort: 9192 - env: - - name: MB_BACKEND - value: "nats" - - name: LOG_LEVEL - value: "DEBUG" - - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY - value: "FALSE" - - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY - value: "FALSE" - envFrom: - - secretRef: - name: crdb-data - - secretRef: - name: nats-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:1010"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:1010"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/context:latest + imagePullPolicy: Always + ports: + - containerPort: 1010 + - containerPort: 9192 + env: + - name: MB_BACKEND + value: "nats" + - name: LOG_LEVEL + value: "INFO" + - name: ALLOW_EXPLICIT_ADD_DEVICE_TO_TOPOLOGY + value: "FALSE" + - name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY + value: "FALSE" + envFrom: + - secretRef: + name: crdb-data + - secretRef: + name: nats-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:1010"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:1010"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -75,14 +75,14 @@ spec: selector: app: contextservice ports: - - name: grpc - protocol: TCP - port: 1010 - targetPort: 1010 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 1010 + targetPort: 1010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -96,12 +96,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/dbscanservingservice.yaml b/manifests/dbscanservingservice.yaml index b5b8fc437a9b7f47f92defdbc96ee71a56715316..2bdd1c3faad870f5646c456f815ae790642df45b 100644 --- a/manifests/dbscanservingservice.yaml +++ b/manifests/dbscanservingservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest - imagePullPolicy: Always - ports: - - containerPort: 10008 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10008"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10008"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/dbscanserving:latest + imagePullPolicy: Always + ports: + - containerPort: 10008 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10008"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10008"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: dbscanservingservice ports: - - name: grpc - port: 10008 - targetPort: 10008 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10008 + targetPort: 10008 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 7f7885dafa8c87764bc34cb6b21122bba0e80584..fdc3cea028a181b7dafbcfa6f3ef4e798939438e 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -31,33 +31,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/device:latest - imagePullPolicy: Always - ports: - - containerPort: 2020 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - startupProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - failureThreshold: 30 - periodSeconds: 1 - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:2020"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/device:latest + imagePullPolicy: Always + ports: + - containerPort: 2020 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + startupProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:2020"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -70,11 +70,11 @@ spec: selector: app: deviceservice ports: - - name: grpc - protocol: TCP - port: 2020 - targetPort: 2020 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 2020 + targetPort: 2020 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/dltservice.yaml b/manifests/dltservice.yaml index 5e8f745f78b14ea4f8881b8992ef788ce89fdcc2..f905749b444e821fce36d88fa24259d4ce03237b 100644 --- a/manifests/dltservice.yaml +++ b/manifests/dltservice.yaml @@ -27,57 +27,57 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: connector - image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest - imagePullPolicy: Always - ports: - - containerPort: 8080 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - ## for debug purposes - #- name: DLT_GATEWAY_HOST - # value: "mock-blockchain.tfs-bchain.svc.cluster.local" - #- name: DLT_GATEWAY_PORT - # value: "50051" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:8080"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:8080"] - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi - - name: gateway - image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest - imagePullPolicy: Always - ports: - - containerPort: 50051 - #readinessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - #livenessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - resources: - requests: - cpu: 200m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: connector + image: labs.etsi.org:5050/tfs/controller/dlt-connector:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + ## for debug purposes + #- name: DLT_GATEWAY_HOST + # value: "mock-blockchain.tfs-bchain.svc.cluster.local" + #- name: DLT_GATEWAY_PORT + # value: "50051" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:8080"] + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi + - name: gateway + image: labs.etsi.org:5050/tfs/controller/dlt-gateway:latest + imagePullPolicy: Always + ports: + - containerPort: 50051 + #readinessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + #livenessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -90,11 +90,11 @@ spec: selector: app: dltservice ports: - - name: grpc - protocol: TCP - port: 8080 - targetPort: 8080 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 8080 + targetPort: 8080 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/e2e_orchestratorservice.yaml b/manifests/e2e_orchestratorservice.yaml index 899e17fff32e02473d7249eda937282d394efa4e..90d37771171d1f062a17d071bebe1fd1fee859ad 100644 --- a/manifests/e2e_orchestratorservice.yaml +++ b/manifests/e2e_orchestratorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest - imagePullPolicy: Always - ports: - - containerPort: 10050 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10050"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10050"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/e2e_orchestrator:latest + imagePullPolicy: Always + ports: + - containerPort: 10050 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10050"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10050"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: e2e-orchestratorservice ports: - - name: grpc - port: 10050 - targetPort: 10050 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10050 + targetPort: 10050 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/forecasterservice.yaml b/manifests/forecasterservice.yaml index 55d4add88f6fc507e9a4271cb40b20c4742c5bc7..d1136f61789cc33069aa0f3c40e578755ec70137 100644 --- a/manifests/forecasterservice.yaml +++ b/manifests/forecasterservice.yaml @@ -28,35 +28,35 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/forecaster:latest - imagePullPolicy: Always - ports: - - containerPort: 10040 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: FORECAST_TO_HISTORY_RATIO - value: "10" - startupProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - failureThreshold: 30 - periodSeconds: 1 - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10040"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/forecaster:latest + imagePullPolicy: Always + ports: + - containerPort: 10040 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: FORECAST_TO_HISTORY_RATIO + value: "10" + startupProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10040"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -69,14 +69,14 @@ spec: selector: app: forecasterservice ports: - - name: grpc - protocol: TCP - port: 10040 - targetPort: 10040 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10040 + targetPort: 10040 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -90,12 +90,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index a235834025724cb11e6b26364f25ae9feb28fe8c..ad9be3a3ec507a57b3cbcf196fd04a73a9fb34ac 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -27,30 +27,30 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/interdomain:latest - imagePullPolicy: Always - ports: - - containerPort: 10010 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: TOPOLOGY_ABSTRACTOR - value: "DISABLE" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10010"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10010"] - resources: - requests: - cpu: 250m - memory: 64Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/interdomain:latest + imagePullPolicy: Always + ports: + - containerPort: 10010 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: TOPOLOGY_ABSTRACTOR + value: "DISABLE" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10010"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10010"] + resources: + requests: + cpu: 250m + memory: 64Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -63,11 +63,11 @@ spec: selector: app: interdomainservice ports: - - name: grpc - protocol: TCP - port: 10010 - targetPort: 10010 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10010 + targetPort: 10010 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/l3_attackmitigatorservice.yaml b/manifests/l3_attackmitigatorservice.yaml index ee97d2c92abb5abcad80f8ddf04800ef13144522..973b805bd7fb8c33a6b9e307afa87d8cb21ad7ec 100644 --- a/manifests/l3_attackmitigatorservice.yaml +++ b/manifests/l3_attackmitigatorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest - imagePullPolicy: Always - ports: - - containerPort: 10002 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10002"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10002"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_attackmitigator:latest + imagePullPolicy: Always + ports: + - containerPort: 10002 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10002"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10002"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,13 +61,13 @@ spec: selector: app: l3-attackmitigatorservice ports: - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 - - name: grpc - port: 10002 - targetPort: 10002 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 + - name: grpc + port: 10002 + targetPort: 10002 --- apiVersion: autoscaling/v2 @@ -82,12 +82,12 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 behavior: scaleDown: stabilizationWindowSeconds: 120 @@ -100,9 +100,9 @@ metadata: labels: app: l3-attackmitigatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -112,11 +112,11 @@ spec: app: l3-attackmitigatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/l3_centralizedattackdetectorservice.yaml b/manifests/l3_centralizedattackdetectorservice.yaml index 8a3be69b672200120afb4bca3892dd0c08ec2d65..98c5f9b41519f13d784a7cab7027a23d9d082714 100644 --- a/manifests/l3_centralizedattackdetectorservice.yaml +++ b/manifests/l3_centralizedattackdetectorservice.yaml @@ -27,36 +27,36 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10001 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: BATCH_SIZE - value: "256" - - name: CAD_CLASSIFICATION_THRESHOLD - value: "0.5" - - name: MONITORED_KPIS_TIME_INTERVAL_AGG - value: "60" - - name: TEST_ML_MODEL - value: "0" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10001"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10001"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_centralizedattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10001 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: BATCH_SIZE + value: "256" + - name: CAD_CLASSIFICATION_THRESHOLD + value: "0.5" + - name: MONITORED_KPIS_TIME_INTERVAL_AGG + value: "60" + - name: TEST_ML_MODEL + value: "0" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10001"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10001"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -69,13 +69,13 @@ spec: selector: app: l3-centralizedattackdetectorservice ports: - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 - - name: grpc - port: 10001 - targetPort: 10001 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 + - name: grpc + port: 10001 + targetPort: 10001 --- apiVersion: autoscaling/v2 @@ -90,12 +90,12 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 behavior: scaleDown: stabilizationWindowSeconds: 120 @@ -107,9 +107,9 @@ metadata: labels: app: l3-centralizedattackdetectorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -119,11 +119,11 @@ spec: app: l3-centralizedattackdetectorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/l3_distributedattackdetectorservice.yaml b/manifests/l3_distributedattackdetectorservice.yaml index b363c1d5c5f82083d525959d6aedf337554f604a..bf72b5cd0752b2219ef3b5c2abad0b9330d43853 100644 --- a/manifests/l3_distributedattackdetectorservice.yaml +++ b/manifests/l3_distributedattackdetectorservice.yaml @@ -27,27 +27,27 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10000 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10000"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10000"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/l3_distributedattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10000 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10000"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10000"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -58,6 +58,6 @@ spec: selector: app: l3-distributedattackdetectorservice ports: - - name: grpc - port: 10000 - targetPort: 10000 + - name: grpc + port: 10000 + targetPort: 10000 diff --git a/manifests/load_generatorservice.yaml b/manifests/load_generatorservice.yaml index 7cc6f19122573a612ddca774c3a785bff93f8b38..bda284ebda122780762c72b53052de28b2b25385 100644 --- a/manifests/load_generatorservice.yaml +++ b/manifests/load_generatorservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/load_generator:latest - imagePullPolicy: Always - ports: - - containerPort: 50052 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50052"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50052"] - resources: - requests: - cpu: 256m - memory: 64Mi - limits: - cpu: 512m - memory: 128Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/load_generator:latest + imagePullPolicy: Always + ports: + - containerPort: 50052 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50052"] + resources: + requests: + cpu: 256m + memory: 64Mi + limits: + cpu: 512m + memory: 128Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: load-generatorservice ports: - - name: grpc - protocol: TCP - port: 50052 - targetPort: 50052 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 50052 + targetPort: 50052 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/mock_blockchain.yaml b/manifests/mock_blockchain.yaml index 1093610f8a0cf4d735e16e99df56cb78fd6481fc..806432be630fa2502e1c121a9c61f16e0fcd1b0c 100644 --- a/manifests/mock_blockchain.yaml +++ b/manifests/mock_blockchain.yaml @@ -27,27 +27,27 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest - imagePullPolicy: Always - ports: - - containerPort: 50051 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50051"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:50051"] - resources: - requests: - cpu: 100m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/mock_blockchain:latest + imagePullPolicy: Always + ports: + - containerPort: 50051 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:50051"] + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -58,7 +58,7 @@ spec: selector: app: mock-blockchain ports: - - name: grpc - protocol: TCP - port: 50051 - targetPort: 50051 + - name: grpc + protocol: TCP + port: 50051 + targetPort: 50051 diff --git a/manifests/monitoringservice.yaml b/manifests/monitoringservice.yaml index 1a20593959a98b247272000f6de864362ed8cf21..3a4d43cd9d17d645da4d70a22ec4dde12a8d7bc2 100644 --- a/manifests/monitoringservice.yaml +++ b/manifests/monitoringservice.yaml @@ -28,31 +28,31 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/monitoring:latest - imagePullPolicy: Always - ports: - - containerPort: 7070 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - envFrom: - - secretRef: - name: qdb-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:7070"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:7070"] - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/monitoring:latest + imagePullPolicy: Always + ports: + - containerPort: 7070 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + envFrom: + - secretRef: + name: qdb-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:7070"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:7070"] + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -65,11 +65,11 @@ spec: selector: app: monitoringservice ports: - - name: grpc - protocol: TCP - port: 7070 - targetPort: 7070 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 7070 + targetPort: 7070 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index f5477aeb40582717aa6477ef0baad0de4f51b297..0a3bd1ea69918429de73d58812248269ccf70a56 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -28,29 +28,29 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/nbi:latest - imagePullPolicy: Always - ports: - - containerPort: 8080 - - containerPort: 9090 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:9090"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:9090"] - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/nbi:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + - containerPort: 9090 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:9090"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:9090"] + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -63,15 +63,15 @@ spec: selector: app: nbiservice ports: - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - - name: grpc - protocol: TCP - port: 9090 - targetPort: 9090 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + - name: grpc + protocol: TCP + port: 9090 + targetPort: 9090 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index e8e8a80e4d4325a28202070a463cda959e3ed634..91440fb7a2bf417d7a020b38070570f2ed3da8df 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -20,40 +20,40 @@ metadata: nginx.ingress.kubernetes.io/rewrite-target: /$2 spec: rules: - - http: - paths: - - path: /webui(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 8004 - - path: /grafana(/|$)(.*) - pathType: Prefix - backend: - service: - name: webuiservice - port: - number: 3000 - - path: /()(restconf/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 - - path: /()(debug-api/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 - - path: /()(bmw/.*) - pathType: Prefix - backend: - service: - name: nbiservice - port: - number: 8080 + - http: + paths: + - path: /webui(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 8004 + - path: /grafana(/|$)(.*) + pathType: Prefix + backend: + service: + name: webuiservice + port: + number: 3000 + - path: /()(restconf/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /()(debug-api/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 + - path: /()(bmw/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 diff --git a/manifests/opticalattackdetectorservice.yaml b/manifests/opticalattackdetectorservice.yaml index 197c23dd237e5f6271fbab7e47613892c6f58f83..11fd62b61f77a30c6916fbf189758947beb96286 100644 --- a/manifests/opticalattackdetectorservice.yaml +++ b/manifests/opticalattackdetectorservice.yaml @@ -27,33 +27,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackdetector:latest - imagePullPolicy: Always - ports: - - containerPort: 10006 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10006"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10006"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackdetector:latest + imagePullPolicy: Always + ports: + - containerPort: 10006 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10006"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10006"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -66,12 +66,12 @@ spec: selector: app: opticalattackdetectorservice ports: - - name: grpc - port: 10006 - targetPort: 10006 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10006 + targetPort: 10006 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -85,12 +85,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/opticalattackmanagerservice.yaml b/manifests/opticalattackmanagerservice.yaml index f9838bcbb1c0d86e5d3a22d8ed982b533f984eb3..4e01d76ac838bc3b0353f95de0c493d8004e5a9a 100644 --- a/manifests/opticalattackmanagerservice.yaml +++ b/manifests/opticalattackmanagerservice.yaml @@ -28,33 +28,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackmanager:latest - imagePullPolicy: Always - ports: - - containerPort: 10005 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - - name: MONITORING_INTERVAL - value: "30" - - name: OPTICALATTACKMANAGERSERVICE_LOOP_MIN_WORKERS - value: "2" # remember to align this with the resource limits - - name: OPTICALATTACKMANAGERSERVICE_LOOP_MAX_WORKERS - value: "10" # remember to align this with the resource limits - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 10000m - memory: 10240Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackmanager:latest + imagePullPolicy: Always + ports: + - containerPort: 10005 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: MONITORING_INTERVAL + value: "30" + - name: OPTICALATTACKMANAGERSERVICE_LOOP_MIN_WORKERS + value: "2" # remember to align this with the resource limits + - name: OPTICALATTACKMANAGERSERVICE_LOOP_MAX_WORKERS + value: "10" # remember to align this with the resource limits + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 10000m + memory: 10240Mi --- apiVersion: v1 kind: Service @@ -67,9 +67,9 @@ spec: selector: app: opticalattackmanagerservice ports: - - name: grpc - port: 10005 - targetPort: 10005 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10005 + targetPort: 10005 + - name: metrics + port: 9192 + targetPort: 9192 diff --git a/manifests/opticalattackmitigatorservice.yaml b/manifests/opticalattackmitigatorservice.yaml index 4d148b347157bd310ec6c921670f0434315e6e27..255e0fd86a5c948d44868e7b96e6c65825556910 100644 --- a/manifests/opticalattackmitigatorservice.yaml +++ b/manifests/opticalattackmitigatorservice.yaml @@ -27,28 +27,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest - imagePullPolicy: Always - ports: - - containerPort: 10007 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10007"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10007"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/opticalattackmitigator:latest + imagePullPolicy: Always + ports: + - containerPort: 10007 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10007"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10007"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -61,12 +61,12 @@ spec: selector: app: opticalattackmitigatorservice ports: - - name: grpc - port: 10007 - targetPort: 10007 - - name: metrics - port: 9192 - targetPort: 9192 + - name: grpc + port: 10007 + targetPort: 10007 + - name: metrics + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -80,12 +80,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/opticalcontrollerservice.yaml b/manifests/opticalcontrollerservice.yaml index 4b677ee4f7287b0790aaa0b19f034db03978fac0..f2351720c008d9ecaf53779253ba8a368ffb8bf7 100644 --- a/manifests/opticalcontrollerservice.yaml +++ b/manifests/opticalcontrollerservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: localhost:32000/tfs/opticalcontroller:dev - imagePullPolicy: Never - ports: - - containerPort: 10060 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "INFO" - #readinessProbe: - # exec: - # command: ["/bin/grpc_health_probe", "-addr=:10060"] - #livenessProbe: - # exec: - # command: ["/bin/grpc_health_probe", "-addr=:10060"] - resources: - requests: - cpu: 500m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: localhost:32000/tfs/opticalcontroller:dev + imagePullPolicy: Never + ports: + - containerPort: 10060 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + #readinessProbe: + # exec: + # command: ["/bin/grpc_health_probe", "-addr=:10060"] + #livenessProbe: + # exec: + # command: ["/bin/grpc_health_probe", "-addr=:10060"] + resources: + requests: + cpu: 500m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,11 +62,11 @@ spec: selector: app: opticalcontrollerservice ports: - - name: grpc - protocol: TCP - port: 10060 - targetPort: 10060 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10060 + targetPort: 10060 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 4beadea8fc6e4bc01f64769c9366c91985ef17c3..f017e694085da963944194197c91f5b4e9bcf70c 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -28,54 +28,54 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: frontend - image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest - imagePullPolicy: Always - ports: - - containerPort: 10020 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: ENABLE_FORECASTER - value: "YES" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10020"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:10020"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi - - name: backend - image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest - imagePullPolicy: Always - ports: - - containerPort: 8081 - #readinessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - #livenessProbe: - # httpGet: - # path: /health - # port: 8081 - # initialDelaySeconds: 5 - # timeoutSeconds: 5 - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: frontend + image: labs.etsi.org:5050/tfs/controller/pathcomp-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 10020 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: ENABLE_FORECASTER + value: "NO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10020"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:10020"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: backend + image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 8081 + #readinessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + #livenessProbe: + # httpGet: + # path: /health + # port: 8081 + # initialDelaySeconds: 5 + # timeoutSeconds: 5 + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -88,18 +88,18 @@ spec: selector: app: pathcompservice ports: - - name: grpc - protocol: TCP - port: 10020 - targetPort: 10020 - - name: http - protocol: TCP - port: 8081 - targetPort: 8081 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 10020 + targetPort: 10020 + - name: http + protocol: TCP + port: 8081 + targetPort: 8081 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -113,12 +113,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/policyservice.yaml b/manifests/policyservice.yaml index b34331724665e4788d2de327bb5160eb370fed96..b93eeda03bb32159a587eabdf776d21e4786a10a 100644 --- a/manifests/policyservice.yaml +++ b/manifests/policyservice.yaml @@ -121,9 +121,9 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 diff --git a/manifests/prometheus.yaml b/manifests/prometheus.yaml index 43a766b6e2ae67e36d161c3063a8a2d3ccc875d3..ec1ffb0c509437a715c508aa378a2a01f1c2252a 100644 --- a/manifests/prometheus.yaml +++ b/manifests/prometheus.yaml @@ -60,18 +60,18 @@ spec: spec: #serviceAccountName: prometheus containers: - - name: prometheus - image: prom/prometheus:v2.28.1 - ports: - - containerPort: 9090 - volumeMounts: - - name: prometheus-config-volume - mountPath: /etc/prometheus/prometheus.yml - subPath: prometheus.yml + - name: prometheus + image: prom/prometheus:v2.28.1 + ports: + - containerPort: 9090 + volumeMounts: + - name: prometheus-config-volume + mountPath: /etc/prometheus/prometheus.yml + subPath: prometheus.yml volumes: - - name: prometheus-config-volume - configMap: - name: prometheus-config + - name: prometheus-config-volume + configMap: + name: prometheus-config restartPolicy: Always --- apiVersion: v1 @@ -85,8 +85,9 @@ spec: selector: app: prometheus ports: - - name: http - protocol: TCP - port: 9090 - targetPort: 9090 + - name: http + protocol: TCP + port: 9090 + targetPort: 9090 --- + diff --git a/manifests/servicemonitors.yaml b/manifests/servicemonitors.yaml index 1f7f3322d77e3e06a0fd902baad4481e77e143ac..ccfe774b3a4b860c5e7f393ee9e3d8ca4eb097ae 100644 --- a/manifests/servicemonitors.yaml +++ b/manifests/servicemonitors.yaml @@ -20,9 +20,9 @@ metadata: labels: app: contextservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -32,14 +32,14 @@ spec: app: contextservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -49,9 +49,9 @@ metadata: labels: app: deviceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -61,14 +61,14 @@ spec: app: deviceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -78,9 +78,9 @@ metadata: labels: app: serviceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -90,14 +90,14 @@ spec: app: serviceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -107,9 +107,9 @@ metadata: labels: app: sliceservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -119,14 +119,14 @@ spec: app: sliceservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -136,9 +136,9 @@ metadata: labels: app: pathcompservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -148,14 +148,14 @@ spec: app: pathcompservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -165,9 +165,9 @@ metadata: labels: app: monitoringservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -177,14 +177,14 @@ spec: app: monitoringservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -194,9 +194,9 @@ metadata: labels: app: dltservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -206,14 +206,14 @@ spec: app: dltservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -223,9 +223,9 @@ metadata: labels: app: interdomainservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -235,14 +235,14 @@ spec: app: interdomainservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -252,9 +252,9 @@ metadata: labels: app: policyservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -264,14 +264,14 @@ spec: app: policyservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /q/metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /q/metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -281,9 +281,9 @@ metadata: labels: app: ztpservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -293,14 +293,14 @@ spec: app: ztpservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /q/metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /q/metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -310,9 +310,9 @@ metadata: labels: app: nbiservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -322,14 +322,14 @@ spec: app: nbiservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -339,9 +339,9 @@ metadata: labels: app: load-generatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -351,14 +351,14 @@ spec: app: load-generatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -368,9 +368,9 @@ metadata: labels: app: dbscanservingservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -380,14 +380,14 @@ spec: app: dbscanservingservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -397,9 +397,9 @@ metadata: labels: app: opticalattackmitigatorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -409,14 +409,14 @@ spec: app: opticalattackmitigatorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -426,9 +426,9 @@ metadata: labels: app: opticalattackdetectorservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -438,14 +438,14 @@ spec: app: opticalattackdetectorservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -455,9 +455,9 @@ metadata: labels: app: opticalattackmanagerservice #release: prometheus - #release: prom # name of the release - # ( VERY IMPORTANT: You need to know the correct release name by viewing - # the servicemonitor of Prometheus itself: Without the correct name, + #release: prom # name of the release + # ( VERY IMPORTANT: You need to know the correct release name by viewing + # the servicemonitor of Prometheus itself: Without the correct name, # Prometheus cannot identify the metrics of the Flask app as the target.) spec: selector: @@ -467,11 +467,11 @@ spec: app: opticalattackmanagerservice # same as above #release: prometheus # same as above endpoints: - - port: metrics # named port in target app - scheme: http - path: /metrics # path to scrape - interval: 5s # scrape interval + - port: metrics # named port in target app + scheme: http + path: /metrics # path to scrape + interval: 5s # scrape interval namespaceSelector: any: false matchNames: - - tfs # namespace where the app is running + - tfs # namespace where the app is running diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 3865fd6c0bee8d5222d57c50df58435a5669b9e1..2fb7ebb8756a48b0a909734b7b9a90a44d79b9ea 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -28,28 +28,28 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/service:latest - imagePullPolicy: Always - ports: - - containerPort: 3030 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:3030"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:3030"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/service:latest + imagePullPolicy: Always + ports: + - containerPort: 3030 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:3030"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:3030"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -62,14 +62,14 @@ spec: selector: app: serviceservice ports: - - name: grpc - protocol: TCP - port: 3030 - targetPort: 3030 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 3030 + targetPort: 3030 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -83,12 +83,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 61f5b1d21e0e6305449020fccb139a0dfe21b046..0daa8e70f4965274be563a8e5f703855281fe698 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -28,33 +28,33 @@ spec: spec: terminationGracePeriodSeconds: 5 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/slice:latest - imagePullPolicy: Always - ports: - - containerPort: 4040 - - containerPort: 9192 - env: - - name: LOG_LEVEL - value: "DEBUG" - - name: SLICE_GROUPING - value: "DISABLE" - envFrom: - - secretRef: - name: qdb-data - readinessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:4040"] - livenessProbe: - exec: - command: ["/bin/grpc_health_probe", "-addr=:4040"] - resources: - requests: - cpu: 250m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/slice:latest + imagePullPolicy: Always + ports: + - containerPort: 4040 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: SLICE_GROUPING + value: "DISABLE" + envFrom: + - secretRef: + name: qdb-data + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:4040"] + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=:4040"] + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -67,14 +67,14 @@ spec: selector: app: sliceservice ports: - - name: grpc - protocol: TCP - port: 4040 - targetPort: 4040 - - name: metrics - protocol: TCP - port: 9192 - targetPort: 9192 + - name: grpc + protocol: TCP + port: 4040 + targetPort: 4040 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler @@ -88,12 +88,12 @@ spec: minReplicas: 1 maxReplicas: 20 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80 #behavior: # scaleDown: # stabilizationWindowSeconds: 30 diff --git a/manifests/teservice.yaml b/manifests/teservice.yaml index 15f1619df08069f00db883f0b918c17837c707d1..ec8f2e3d6ad360eab1a1869e9564fd6c03561e06 100644 --- a/manifests/teservice.yaml +++ b/manifests/teservice.yaml @@ -30,37 +30,37 @@ spec: terminationGracePeriodSeconds: 5 shareProcessNamespace: true containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/te:latest - imagePullPolicy: Always - ports: - - containerPort: 10030 - env: - - name: ERLANG_LOGGER_LEVEL - value: "debug" - - name: ERLANG_COOKIE - value: "tfte-unsafe-cookie" - - name: ERLANG_NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: ERLANG_NODE_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - readinessProbe: - exec: - command: ["/tfte/bin/tfte", "status"] - livenessProbe: - exec: - command: ["/tfte/bin/tfte", "status"] - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 700m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/te:latest + imagePullPolicy: Always + ports: + - containerPort: 10030 + env: + - name: ERLANG_LOGGER_LEVEL + value: "debug" + - name: ERLANG_COOKIE + value: "tfte-unsafe-cookie" + - name: ERLANG_NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ERLANG_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + readinessProbe: + exec: + command: ["/tfte/bin/tfte", "status"] + livenessProbe: + exec: + command: ["/tfte/bin/tfte", "status"] + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 700m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -71,11 +71,11 @@ spec: selector: app: teservice ports: - - name: grpc - protocol: TCP - port: 10030 - targetPort: 10030 - - name: pcep - protocol: TCP - port: 4189 - targetPort: 4189 + - name: grpc + protocol: TCP + port: 10030 + targetPort: 10030 + - name: pcep + protocol: TCP + port: 4189 + targetPort: 4189 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 43caa9f04b56d6477d82c5a5bb22cb292eec8a90..bb2573c454a7bf59364a7526a1b5193111daf892 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -32,73 +32,73 @@ spec: supplementalGroups: - 0 containers: - - name: server - image: labs.etsi.org:5050/tfs/controller/webui:latest - imagePullPolicy: Always - ports: - - containerPort: 8004 - env: - - name: LOG_LEVEL - value: "INFO" - - name: WEBUISERVICE_SERVICE_BASEURL_HTTP - value: "/webui/" - readinessProbe: - httpGet: - path: /healthz/ready - port: 8004 - initialDelaySeconds: 5 - timeoutSeconds: 1 - livenessProbe: - httpGet: - path: /healthz/live - port: 8004 - initialDelaySeconds: 5 - timeoutSeconds: 1 - resources: - requests: - cpu: 50m - memory: 128Mi - limits: - cpu: 1000m - memory: 1024Mi - - name: grafana - image: grafana/grafana:8.5.22 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - env: - - name: GF_SERVER_ROOT_URL - value: "http://0.0.0.0:3000/grafana/" - - name: GF_SERVER_SERVE_FROM_SUB_PATH - value: "true" - readinessProbe: - failureThreshold: 60 - httpGet: - #path: /robots.txt - path: /login - port: 3000 - scheme: HTTP - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 60 - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 500m - memory: 1024Mi + - name: server + image: labs.etsi.org:5050/tfs/controller/webui:latest + imagePullPolicy: Always + ports: + - containerPort: 8004 + env: + - name: LOG_LEVEL + value: "INFO" + - name: WEBUISERVICE_SERVICE_BASEURL_HTTP + value: "/webui/" + readinessProbe: + httpGet: + path: /healthz/ready + port: 8004 + initialDelaySeconds: 5 + timeoutSeconds: 1 + livenessProbe: + httpGet: + path: /healthz/live + port: 8004 + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + requests: + cpu: 50m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi + - name: grafana + image: grafana/grafana:8.5.22 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + env: + - name: GF_SERVER_ROOT_URL + value: "http://0.0.0.0:3000/grafana/" + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + readinessProbe: + failureThreshold: 60 + httpGet: + #path: /robots.txt + path: /login + port: 3000 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 60 + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 500m + memory: 1024Mi --- apiVersion: v1 kind: Service @@ -111,9 +111,9 @@ spec: selector: app: webuiservice ports: - - name: webui - port: 8004 - targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + - name: webui + port: 8004 + targetPort: 8004 + - name: grafana + port: 3000 + targetPort: 3000 diff --git a/manifests/ztpservice.yaml b/manifests/ztpservice.yaml index e2be80cea7b1e779fdc9bb05110ddc0d6b144101..323d3c4bc902b99ddf2ee9dd9be1594fcbe986f5 100644 --- a/manifests/ztpservice.yaml +++ b/manifests/ztpservice.yaml @@ -117,9 +117,9 @@ spec: minReplicas: 1 maxReplicas: 10 metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 80