Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (389)
Showing
with 580 additions and 140 deletions
......@@ -25,12 +25,14 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
.my_venv/
# requirements.txt # removed to enable tracking versions of packages over time
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
.manifest/
*.spec
# Installer logs
......
......@@ -34,6 +34,7 @@ include:
- local: '/src/opticalcontroller/.gitlab-ci.yml'
- local: '/src/ztp/.gitlab-ci.yml'
- local: '/src/policy/.gitlab-ci.yml'
- local: '/src/automation/.gitlab-ci.yml'
- local: '/src/forecaster/.gitlab-ci.yml'
#- local: '/src/webui/.gitlab-ci.yml'
#- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml'
......@@ -49,5 +50,10 @@ include:
- local: '/src/kpi_value_api/.gitlab-ci.yml'
- local: '/src/kpi_value_writer/.gitlab-ci.yml'
- local: '/src/telemetry/.gitlab-ci.yml'
- local: '/src/analytics/.gitlab-ci.yml'
- local: '/src/qos_profile/.gitlab-ci.yml'
- local: '/src/vnt_manager/.gitlab-ci.yml'
- local: '/src/e2e_orchestrator/.gitlab-ci.yml'
# This should be last one: end-to-end integration tests
- local: '/src/tests/.gitlab-ci.yml'
......@@ -154,8 +154,8 @@ function crdb_undeploy_single() {
echo
}
function crdb_drop_database_single() {
echo "Drop database if exists"
function crdb_drop_databases_single() {
echo "Drop TFS databases, if exist"
if [[ -z "${GITLAB_CI}" ]]; then
#kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o yaml
......@@ -168,9 +168,21 @@ function crdb_drop_database_single() {
CRDB_CLIENT_URL="postgresql://${CRDB_USERNAME}:${CRDB_PASSWORD}@${CRDB_HOST}:${CRDB_PORT}/defaultdb?sslmode=require"
echo "CRDB_CLIENT_URL=${CRDB_CLIENT_URL}"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
DATABASES=$(
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "SHOW DATABASES;" --format=tsv | awk '{print $1}' | grep "^tfs"
)
echo "Found TFS databases: ${DATABASES}" | tr '\n' ' '
echo
for DB_NAME in $DATABASES; do
echo "Dropping TFS database: $DB_NAME"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute="DROP DATABASE IF EXISTS $DB_NAME CASCADE;"
done
echo
}
......@@ -349,11 +361,23 @@ function crdb_undeploy_cluster() {
echo
}
function crdb_drop_database_cluster() {
echo "Drop database if exists"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public --execute \
"DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
function crdb_drop_databases_cluster() {
echo "Drop TFS databases, if exist"
DATABASES=$(
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public \
--execute "SHOW DATABASES;" --format=tsv | awk '{print $1}' | grep "^tfs"
)
echo "Found TFS databases: ${DATABASES}" | tr '\n' ' '
echo
for DB_NAME in $DATABASES; do
echo "Dropping TFS database: $DB_NAME"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-client-secure -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public \
--execute="DROP DATABASE IF EXISTS $DB_NAME CASCADE;"
done
echo
}
......@@ -365,7 +389,7 @@ if [ "$CRDB_DEPLOY_MODE" == "single" ]; then
crdb_deploy_single
if [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then
crdb_drop_database_single
crdb_drop_databases_single
fi
elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
if [ "$CRDB_REDEPLOY" == "YES" ]; then
......@@ -375,7 +399,7 @@ elif [ "$CRDB_DEPLOY_MODE" == "cluster" ]; then
crdb_deploy_cluster
if [ "$CRDB_DROP_DATABASE_IF_EXISTS" == "YES" ]; then
crdb_drop_database_cluster
crdb_drop_databases_cluster
fi
else
echo "Unsupported value: CRDB_DEPLOY_MODE=$CRDB_DEPLOY_MODE"
......
......@@ -47,10 +47,10 @@ function kafka_deploy() {
cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}"
# echo "Apache Kafka Namespace"
echo ">>> Delete Apache Kafka Namespace"
echo "Delete Apache Kafka Namespace"
kubectl delete namespace ${KFK_NAMESPACE} --ignore-not-found
echo ">>> Create Apache Kafka Namespace"
echo "Create Apache Kafka Namespace"
kubectl create namespace ${KFK_NAMESPACE}
# echo ">>> Deplying Apache Kafka Zookeeper"
......@@ -76,15 +76,15 @@ function kafka_deploy() {
# fi
}
echo "Apache Kafka"
echo ">>> Checking if Apache Kafka is deployed ... "
echo ">>> Apache Kafka"
echo "Checking if Apache Kafka is deployed ... "
if [ "$KFK_REDEPLOY" == "YES" ]; then
echo ">>> Redeploying kafka namespace"
echo "Redeploying kafka namespace"
kafka_deploy
elif kubectl get namespace "${KFK_NAMESPACE}" &> /dev/null; then
echo ">>> Apache Kafka already present; skipping step."
echo "Apache Kafka already present; skipping step."
else
echo ">>> Kafka namespace doesn't exists. Deploying kafka namespace"
echo "Kafka namespace doesn't exists. Deploying kafka namespace"
kafka_deploy
fi
echo
......@@ -146,55 +146,17 @@ kubectl create namespace $TFS_K8S_NAMESPACE
sleep 2
printf "\n"
echo "Create secret with CockroachDB data"
echo ">>> Create Secret with CockroachDB data..."
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_CONTEXT=${CRDB_DATABASE} # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_CONTEXT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for KPI Management microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_KPI_MGMT="tfs_kpi_mgmt" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_KPI_MGMT} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Telemetry microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_TELEMETRY="tfs_telemetry" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-telemetry --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_TELEMETRY} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with CockroachDB data for Analytics microservices"
CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
CRDB_DATABASE_ANALYTICS="tfs_analytics" # TODO: change by specific configurable environment variable
kubectl create secret generic crdb-analytics --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
--from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
--from-literal=CRDB_DATABASE=${CRDB_DATABASE_ANALYTICS} \
--from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
--from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
--from-literal=CRDB_SSLMODE=require
printf "\n"
echo "Create secret with Apache Kafka data for KPI, Telemetry and Analytics microservices"
echo ">>> Create Secret with Apache Kakfa..."
KFK_SERVER_PORT=$(kubectl --namespace ${KFK_NAMESPACE} get service kafka-service -o 'jsonpath={.spec.ports[0].port}')
kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
--from-literal=KFK_NAMESPACE=${KFK_NAMESPACE} \
......@@ -382,11 +344,10 @@ for COMPONENT in $TFS_COMPONENTS; do
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
if [ "$TFS_SKIP_BUILD" != "YES" ]; then
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
else
VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$VERSION" | sed 's,//,/,g' | sed 's,http:/,,g')
fi
sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
......
src/tests/ecoc24/
\ No newline at end of file
......@@ -37,9 +37,13 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_analytics"
- name: METRICS_PORT
value: "9192"
envFrom:
- secretRef:
name: crdb-analytics
name: crdb-data
- secretRef:
name: kfk-kpi-data
readinessProbe:
......@@ -60,10 +64,12 @@ spec:
imagePullPolicy: Always
ports:
- containerPort: 30090
- containerPort: 9192
- containerPort: 9193
env:
- name: LOG_LEVEL
value: "INFO"
- name: METRICS_PORT
value: "9193"
envFrom:
- secretRef:
name: kfk-kpi-data
......@@ -92,18 +98,22 @@ spec:
selector:
app: analyticsservice
ports:
- name: frontend-grpc
- name: grpc
protocol: TCP
port: 30080
targetPort: 30080
- name: backend-grpc
- name: grpc-backend
protocol: TCP
port: 30090
targetPort: 30090
- name: metrics
- name: metrics-frontend
protocol: TCP
port: 9192
targetPort: 9192
- name: metrics-backend
protocol: TCP
port: 9193
targetPort: 9193
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: automationservice
spec:
selector:
matchLabels:
app: automationservice
replicas: 1
template:
metadata:
annotations:
# Required for IETF L2VPN SBI when both parent and child run in same K8s cluster with Linkerd
config.linkerd.io/skip-outbound-ports: "2002"
labels:
app: automationservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/automation:latest
imagePullPolicy: Always
ports:
- containerPort: 30200
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
startupProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30200"]
failureThreshold: 30
periodSeconds: 1
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30200"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30200"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: automationservice
labels:
app: automationservice
spec:
type: ClusterIP
selector:
app: automationservice
ports:
- name: grpc
protocol: TCP
port: 30200
targetPort: 30200
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
......@@ -45,6 +45,8 @@ spec:
value: "FALSE"
- name: ALLOW_EXPLICIT_ADD_LINK_TO_TOPOLOGY
value: "FALSE"
- name: CRDB_DATABASE
value: "tfs_context"
envFrom:
- secretRef:
name: crdb-data
......
......@@ -20,8 +20,12 @@ spec:
selector:
matchLabels:
app: e2e-orchestratorservice
replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "8761"
config.linkerd.io/skip-inbound-ports: "8761"
labels:
app: e2e-orchestratorservice
spec:
......@@ -33,9 +37,18 @@ spec:
ports:
- containerPort: 10050
- containerPort: 9192
- containerPort: 8761
env:
- name: LOG_LEVEL
value: "INFO"
- name: WS_IP_HOST
value: "nbiservice.tfs-ip.svc.cluster.local"
- name: WS_IP_PORT
value: "8761"
- name: WS_E2E_HOST
value: "e2e-orchestratorservice.tfs-e2e.svc.cluster.local"
- name: WS_E2E_PORT
value: "8762"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10050"]
......@@ -67,25 +80,6 @@ spec:
- name: metrics
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: e2e-orchestratorservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: e2e-orchestratorservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
- name: ws
port: 8761
targetPort: 8761
......@@ -39,9 +39,11 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_kpi_mgmt"
envFrom:
- secretRef:
name: crdb-kpi-data
name: crdb-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:30010"]
......
......@@ -23,6 +23,9 @@ spec:
replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-inbound-ports: "8762"
config.linkerd.io/skip-outbound-ports: "8762"
labels:
app: nbiservice
spec:
......@@ -35,9 +38,14 @@ spec:
- containerPort: 8080
- containerPort: 9090
- containerPort: 9192
- containerPort: 8762
env:
- name: LOG_LEVEL
value: "INFO"
- name: IETF_NETWORK_RENDERER
value: "LIBYANG"
- name: WS_E2E_PORT
value: "8762"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:9090"]
......@@ -75,3 +83,7 @@ spec:
protocol: TCP
port: 9192
targetPort: 9192
- name: ws
protocol: TCP
port: 8762
targetPort: 8762
......@@ -66,6 +66,6 @@ spec:
pathType: Prefix
backend:
service:
name: qkd-appservice
name: nbiservice
port:
number: 8005
number: 8080
......@@ -28,36 +28,35 @@ spec:
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/qkd_app:latest
imagePullPolicy: Always
ports:
- containerPort: 10060
- containerPort: 9192
- containerPort: 8005
env:
- name: LOG_LEVEL
value: "DEBUG"
- name: CRDB_DATABASE_APP
value: "qkd_app"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
resources:
requests:
cpu: 150m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
- name: server
image: labs.etsi.org:5050/tfs/controller/qkd_app:latest
imagePullPolicy: Always
ports:
- containerPort: 10060
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "qkd_app"
envFrom:
- secretRef:
name: crdb-data
- secretRef:
name: nats-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10060"]
resources:
requests:
cpu: 150m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
---
apiVersion: v1
kind: Service
......@@ -70,14 +69,11 @@ spec:
selector:
app: qkd-appservice
ports:
- name: grpc
protocol: TCP
port: 10060
targetPort: 10060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
- name: http
port: 8005
targetPort: 8005
- name: grpc
protocol: TCP
port: 10060
targetPort: 10060
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: qos-profileservice
spec:
selector:
matchLabels:
app: qos-profileservice
#replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "4222"
labels:
app: qos-profileservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/qos_profile:latest
imagePullPolicy: Always
ports:
- containerPort: 20040
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_qos_profile"
envFrom:
- secretRef:
name: crdb-data
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20040"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:20040"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: qos-profileservice
labels:
app: qos-profileservice
spec:
type: ClusterIP
selector:
app: qos-profileservice
ports:
- name: grpc
protocol: TCP
port: 20040
targetPort: 20040
- name: metrics
protocol: TCP
port: 9192
targetPort: 9192
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: qos-profileservice-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: qos-profileservice
minReplicas: 1
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
#behavior:
# scaleDown:
# stabilizationWindowSeconds: 30
......@@ -475,3 +475,156 @@ spec:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-analyticsservice-metric
labels:
app: analyticsservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: analyticsservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics-frontend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
- port: metrics-backend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-telemetryservice-metric
labels:
app: telemetryservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: telemetryservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics-frontend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
- port: metrics-backend # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi-managerservice-metric
labels:
app: kpi-managerservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi-managerservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi_value_apiservice-metric
labels:
app: kpi_value_apiservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi_value_apiservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
namespace: monitoring # namespace where prometheus is running
name: tfs-kpi_value_writerservice-metric
labels:
app: kpi_value_writerservice
#release: prometheus
#release: prom # name of the release
# ( VERY IMPORTANT: You need to know the correct release name by viewing
# the servicemonitor of Prometheus itself: Without the correct name,
# Prometheus cannot identify the metrics of the Flask app as the target.)
spec:
selector:
matchLabels:
# Target app service
#namespace: tfs
app: kpi_value_writerservice # same as above
#release: prometheus # same as above
endpoints:
- port: metrics # named port in target app
scheme: http
path: /metrics # path to scrape
interval: 5s # scrape interval
namespaceSelector:
any: false
matchNames:
- tfs # namespace where the app is running
......@@ -37,9 +37,13 @@ spec:
env:
- name: LOG_LEVEL
value: "INFO"
- name: CRDB_DATABASE
value: "tfs_telemetry"
- name: METRICS_PORT
value: "9192"
envFrom:
- secretRef:
name: crdb-telemetry
name: crdb-data
- secretRef:
name: kfk-kpi-data
readinessProbe:
......@@ -60,10 +64,12 @@ spec:
imagePullPolicy: Always
ports:
- containerPort: 30060
- containerPort: 9192
- containerPort: 9193
env:
- name: LOG_LEVEL
value: "INFO"
- name: METRICS_PORT
value: "9193"
envFrom:
- secretRef:
name: kfk-kpi-data
......@@ -92,18 +98,22 @@ spec:
selector:
app: telemetryservice
ports:
- name: frontend-grpc
- name: grpc
protocol: TCP
port: 30050
targetPort: 30050
- name: backend-grpc
- name: grpc-backend
protocol: TCP
port: 30060
targetPort: 30060
- name: metrics
- name: metrics-frontend
protocol: TCP
port: 9192
targetPort: 9192
- name: metrics-backend
protocol: TCP
port: 9193
targetPort: 9193
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
......
# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: vnt-managerservice
spec:
selector:
matchLabels:
app: vnt-managerservice
replicas: 1
template:
metadata:
annotations:
config.linkerd.io/skip-outbound-ports: "8765"
config.linkerd.io/skip-inbound-ports: "8765"
labels:
app: vnt-managerservice
spec:
terminationGracePeriodSeconds: 5
containers:
- name: server
image: labs.etsi.org:5050/tfs/controller/vnt_manager:latest
imagePullPolicy: Always
ports:
- containerPort: 10080
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
- name: WS_IP_PORT
value: "8761"
- name: WS_E2E_PORT
value: "8762"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10080"]
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10080"]
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 1000m
memory: 1024Mi
---
apiVersion: v1
kind: Service
metadata:
name: vnt-managerservice
labels:
app: vnt-managerservice
spec:
type: ClusterIP
selector:
app: vnt-managerservice
ports:
- name: grpc
port: 10080
targetPort: 10080
- name: metrics
port: 9192
targetPort: 9192
......@@ -20,13 +20,16 @@
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator"
export TFS_COMPONENTS="context device pathcomp service slice nbi webui"
# Uncomment to activate Monitoring (old)
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api"
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
# Uncomment to activate QoS Profiles
#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
......@@ -62,6 +65,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene
# Uncomment to activate E2E Orchestrator
#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator"
# Uncomment to activate VNT Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager"
# Uncomment to activate DLT and Interdomain
#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt"
#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then
......@@ -80,6 +86,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene
# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}"
#fi
# Uncomment to activate Load Generator
#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......
......@@ -20,7 +20,7 @@ import "kpi_manager.proto";
//import "kpi_sample_types.proto";
service AnalyticsFrontendService {
rpc StartAnalyzer (Analyzer ) returns (AnalyzerId ) {}
rpc StartAnalyzer (Analyzer ) returns (AnalyzerId) {}
rpc StopAnalyzer (AnalyzerId ) returns (context.Empty) {}
rpc SelectAnalyzers(AnalyzerFilter) returns (AnalyzerList ) {}
}
......