Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (235)
Showing
with 940 additions and 56 deletions
......@@ -147,6 +147,15 @@ export QDB_DROP_TABLES_IF_EXIST=${QDB_DROP_TABLES_IF_EXIST:-""}
export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
# ----- K8s Observability ------------------------------------------------------
# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
......@@ -160,6 +169,9 @@ export QDB_REDEPLOY=${QDB_REDEPLOY:-""}
# Deploy QuestDB
./deploy/qdb.sh
# Expose Dashboard
./deploy/expose_dashboard.sh
# Deploy TeraFlowSDN
./deploy/tfs.sh
......
......@@ -167,6 +167,11 @@ function crdb_drop_database_single() {
}
function crdb_deploy_cluster() {
echo "CockroachDB Operator Namespace"
echo ">>> Create CockroachDB Operator Namespace (if missing)"
kubectl apply -f "${CRDB_MANIFESTS_PATH}/pre_operator.yaml"
echo
echo "Cockroach Operator CRDs"
echo ">>> Apply Cockroach Operator CRDs (if they are missing)"
cp "${CRDB_MANIFESTS_PATH}/crds.yaml" "${TMP_MANIFESTS_FOLDER}/crdb_crds.yaml"
......
#!/bin/bash
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Read deployment settings
########################################################################################################################
# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
function expose_dashboard() {
echo "Prometheus Port Mapping"
echo ">>> Expose Prometheus HTTP Mgmt GUI port (9090->${PROM_EXT_PORT_HTTP})"
PROM_PORT_HTTP=$(kubectl --namespace monitoring get service prometheus-k8s -o 'jsonpath={.spec.ports[?(@.name=="web")].port}')
PATCH='{"data": {"'${PROM_EXT_PORT_HTTP}'": "monitoring/prometheus-k8s:'${PROM_PORT_HTTP}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${PROM_EXT_PORT_HTTP}', "hostPort": '${PROM_EXT_PORT_HTTP}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
echo "Grafana Port Mapping"
echo ">>> Expose Grafana HTTP Mgmt GUI port (3000->${GRAF_EXT_PORT_HTTP})"
GRAF_PORT_HTTP=$(kubectl --namespace monitoring get service grafana -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
PATCH='{"data": {"'${GRAF_EXT_PORT_HTTP}'": "monitoring/grafana:'${GRAF_PORT_HTTP}'"}}'
kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
PORT_MAP='{"containerPort": '${GRAF_EXT_PORT_HTTP}', "hostPort": '${GRAF_EXT_PORT_HTTP}'}'
CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
echo
}
expose_dashboard
......@@ -53,7 +53,7 @@ function nats_deploy_single() {
echo ">>> NATS is present; skipping step."
else
echo ">>> Deploy NATS"
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image.tag=2.9-alpine
helm3 install ${NATS_NAMESPACE} nats/nats --namespace ${NATS_NAMESPACE} --set nats.image=nats:2.9-alpine
echo ">>> Waiting NATS statefulset to be created..."
while ! kubectl get --namespace ${NATS_NAMESPACE} statefulset/${NATS_NAMESPACE} &> /dev/null; do
......
......@@ -106,6 +106,15 @@ export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kp
export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
# ----- K8s Observability ------------------------------------------------------
# If not already set, set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"}
# If not already set, set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
......@@ -241,7 +250,8 @@ for COMPONENT in $TFS_COMPONENTS; do
echo " Adapting '$COMPONENT' manifest file..."
MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
# cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
cat ./manifests/"${COMPONENT}"service.yaml | linkerd inject - --proxy-cpu-request "10m" --proxy-cpu-limit "1" --proxy-memory-request "64Mi" --proxy-memory-limit "256Mi" > "$MANIFEST"
if [ "$COMPONENT" == "pathcomp" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
......@@ -335,7 +345,7 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
# Exposed through the ingress controller "tfs-ingress"
GRAFANA_URL="127.0.0.1:${EXT_HTTP_PORT}/grafana"
# Default Grafana credentials
# Default Grafana credentials when installed with the `monitoring` addon
GRAFANA_USERNAME="admin"
GRAFANA_PASSWORD="admin"
......@@ -412,25 +422,84 @@ if [[ "$TFS_COMPONENTS" == *"webui"* ]]; then
},
"secureJsonData": {"password": "'${QDB_PASSWORD}'"}
}' ${GRAFANA_URL_UPDATED}/api/datasources
echo
# adding the datasource of the metrics collection framework
curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
"access" : "proxy",
"type" : "prometheus",
"name" : "prometheus",
"url" : "http://prometheus-k8s.monitoring.svc:9090",
"basicAuth": false,
"isDefault": false,
"jsonData" : {
"httpMethod" : "POST"
}
}' ${GRAFANA_URL_UPDATED}/api/datasources
printf "\n\n"
echo ">> Creating dashboards..."
echo ">> Creating and staring dashboards..."
# Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
# Dashboard: L3 Monitoring KPIs
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
# Dashboard: Slice Grouping
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
printf "\n\n"
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
echo ">> Staring dashboards..."
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
# Dashboard: Component RPCs
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_component_rpc.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-comp-rpc"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
# Dashboard: Device Drivers
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_driver.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-drv"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
# Dashboard: Service Handlers
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_service_handler.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-svc-hdlr"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
# Dashboard: Device ConfigureDevice Details
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_device_config_exec_details.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-dev-confdev"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
# Dashboard: Load Generator Status
curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_prom_load_generator.json' \
${GRAFANA_URL_UPDATED}/api/dashboards/db
echo
DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-loadgen-stats"
DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
echo
......
folder_name="cad_exp_1_results"
if [ -d "$folder_name" ]; then
echo "Folder '$folder_name' already exists. Emptying it..."
rm -r "$folder_name"/*
else
echo "Creating folder '$folder_name'..."
mkdir "$folder_name"
fi
while true; do
list=($(kubectl get pods --namespace tfs | grep l3-centralized | awk '{print $1}'))
#kubectl -n "tfs" cp $pod_name:exp_1.csv $folder_name/$pod_name.csv -c server
echo "These are the pods for now"
for item in "${list[@]}"; do
echo $item
kubectl -n "tfs" cp $item:cad_metrics.csv $folder_name/$item.csv -c server
done
sleep 2
done
# kubectl get pods --namespace tfs | grep l3-centralized | wc -l
# kubectl --namespace tfs get all | grep autoscaling/l3-centralizedattackdetectorservice-hpa | awk '{print $3}'
\ No newline at end of file
../src/automation/target/kubernetes/kubernetes.yml
\ No newline at end of file
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: Service
metadata:
annotations:
app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
labels:
app.kubernetes.io/name: automationservice
app: automationservice
name: automationservice
spec:
ports:
- name: grpc
port: 5050
targetPort: 5050
- name: metrics
protocol: TCP
port: 9192
targetPort: 8080
selector:
app.kubernetes.io/name: automationservice
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
labels:
app: automationservice
app.kubernetes.io/name: automationservice
name: automationservice
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: automationservice
template:
metadata:
annotations:
app.quarkus.io/build-timestamp: 2022-09-19 - 10:48:18 +0000
labels:
app: automationservice
app.kubernetes.io/name: automationservice
spec:
containers:
- env:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONTEXT_SERVICE_HOST
value: contextservice
- name: DEVICE_SERVICE_HOST
value: deviceservice
image: labs.etsi.org:5050/tfs/controller/automation:0.2.0
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
httpGet:
path: /q/health/live
port: 8080
scheme: HTTP
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
name: automationservice
ports:
- containerPort: 5050
name: grpc
protocol: TCP
- containerPort: 8080
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /q/health/ready
port: 8080
scheme: HTTP
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
resources:
requests:
cpu: 50m
memory: 512Mi
limits:
cpu: 500m
memory: 2048Mi
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: automationservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: automationservice
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
# Copyright 2022 The Cockroach Authors
# Copyright 2023 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -23,7 +23,7 @@ spec:
serviceAccountName: cockroachdb-sa
containers:
- name: cockroachdb-client-secure
image: cockroachdb/cockroach:v22.2.0
image: cockroachdb/cockroach:v22.2.8
imagePullPolicy: IfNotPresent
volumeMounts:
- name: client-certs
......
# Copyright 2022 The Cockroach Authors
# Copyright 2023 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -33,16 +33,16 @@ spec:
resources:
requests:
# This is intentionally low to make it work on local k3d clusters.
cpu: 100m
memory: 1Gi
limits:
cpu: 1
cpu: 4
memory: 4Gi
limits:
cpu: 8
memory: 8Gi
tlsEnabled: true
# You can set either a version of the db or a specific image name
# cockroachDBVersion: v22.2.0
# cockroachDBVersion: v22.2.8
image:
name: cockroachdb/cockroach:v22.2.0
name: cockroachdb/cockroach:v22.2.8
# nodes refers to the number of crdb pods that are created
# via the statefulset
nodes: 3
......
This diff is collapsed.
# Copyright 2022 The Cockroach Authors
# Copyright 2023 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -538,8 +538,34 @@ spec:
value: cockroachdb/cockroach:v22.1.11
- name: RELATED_IMAGE_COCKROACH_v22_1_12
value: cockroachdb/cockroach:v22.1.12
- name: RELATED_IMAGE_COCKROACH_v22_1_13
value: cockroachdb/cockroach:v22.1.13
- name: RELATED_IMAGE_COCKROACH_v22_1_14
value: cockroachdb/cockroach:v22.1.14
- name: RELATED_IMAGE_COCKROACH_v22_1_15
value: cockroachdb/cockroach:v22.1.15
- name: RELATED_IMAGE_COCKROACH_v22_1_16
value: cockroachdb/cockroach:v22.1.16
- name: RELATED_IMAGE_COCKROACH_v22_1_18
value: cockroachdb/cockroach:v22.1.18
- name: RELATED_IMAGE_COCKROACH_v22_2_0
value: cockroachdb/cockroach:v22.2.0
- name: RELATED_IMAGE_COCKROACH_v22_2_1
value: cockroachdb/cockroach:v22.2.1
- name: RELATED_IMAGE_COCKROACH_v22_2_2
value: cockroachdb/cockroach:v22.2.2
- name: RELATED_IMAGE_COCKROACH_v22_2_3
value: cockroachdb/cockroach:v22.2.3
- name: RELATED_IMAGE_COCKROACH_v22_2_4
value: cockroachdb/cockroach:v22.2.4
- name: RELATED_IMAGE_COCKROACH_v22_2_5
value: cockroachdb/cockroach:v22.2.5
- name: RELATED_IMAGE_COCKROACH_v22_2_6
value: cockroachdb/cockroach:v22.2.6
- name: RELATED_IMAGE_COCKROACH_v22_2_7
value: cockroachdb/cockroach:v22.2.7
- name: RELATED_IMAGE_COCKROACH_v22_2_8
value: cockroachdb/cockroach:v22.2.8
- name: OPERATOR_NAME
value: cockroachdb
- name: WATCH_NAMESPACE
......@@ -552,7 +578,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: cockroachdb/cockroach-operator:v2.9.0
image: cockroachdb/cockroach-operator:v2.10.0
imagePullPolicy: IfNotPresent
name: cockroach-operator
resources:
......
# Copyright 2022 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: cockroach-operator
name: cockroach-operator-system
......@@ -61,6 +61,7 @@ spec:
containers:
- name: cockroachdb
image: cockroachdb/cockroach:latest-v22.2
imagePullPolicy: Always
args:
- start-single-node
ports:
......
......@@ -20,6 +20,7 @@ spec:
selector:
matchLabels:
app: computeservice
replicas: 1
template:
metadata:
labels:
......@@ -33,6 +34,7 @@ spec:
ports:
- containerPort: 8080
- containerPort: 9090
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
......@@ -44,16 +46,18 @@ spec:
command: ["/bin/grpc_health_probe", "-addr=:9090"]
resources:
requests:
cpu: 250m
memory: 512Mi
cpu: 50m
memory: 64Mi
limits:
cpu: 700m
memory: 1024Mi
cpu: 500m
memory: 512Mi
---
apiVersion: v1
kind: Service
metadata:
name: computeservice
labels:
app: computeservice
spec:
type: ClusterIP
selector:
......@@ -67,3 +71,7 @@ spec:
protocol: TCP
port: 9090
targetPort: 9090
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -20,9 +20,11 @@ spec:
selector:
matchLabels:
app: contextservice
replicas: 1
#replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "4222"
labels:
app: contextservice
spec:
......@@ -52,11 +54,11 @@ spec:
command: ["/bin/grpc_health_probe", "-addr=:1010"]
resources:
requests:
cpu: 50m
memory: 64Mi
cpu: 250m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......@@ -77,3 +79,25 @@ spec:
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: contextservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: contextservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -45,11 +45,11 @@ spec:
command: ["/bin/grpc_health_probe", "-addr=:2020"]
resources:
requests:
cpu: 50m
memory: 64Mi
cpu: 250m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
......
......@@ -33,6 +33,7 @@ spec:
imagePullPolicy: Always
ports:
- containerPort: 50052
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
......@@ -44,11 +45,11 @@ spec:
command: ["/bin/grpc_health_probe", "-addr=:50052"]
resources:
requests:
cpu: 50m
cpu: 256m
memory: 64Mi
limits:
cpu: 500m
memory: 512Mi
cpu: 512m
memory: 128Mi
---
apiVersion: v1
kind: Service
......@@ -65,3 +66,7 @@ spec:
protocol: TCP
port: 50052
targetPort: 50052
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -20,7 +20,7 @@ spec:
selector:
matchLabels:
app: pathcompservice
replicas: 1
#replicas: 1
template:
metadata:
labels:
......@@ -53,6 +53,8 @@ spec:
- name: backend
image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest
imagePullPolicy: Always
ports:
- containerPort: 8081
#readinessProbe:
# httpGet:
# path: /health
......@@ -96,3 +98,25 @@ spec:
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: pathcompservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: pathcompservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
../src/policy/target/kubernetes/kubernetes.yml
\ No newline at end of file