diff --git a/manifests/analyticsservice.yaml b/manifests/analyticsservice.yaml index f014fe2dd29eedd4cb8758c253319b93df81ae36..61666ead951c73e4034110b00a51743d33bd4ce2 100644 --- a/manifests/analyticsservice.yaml +++ b/manifests/analyticsservice.yaml @@ -90,7 +90,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: analytics-frontendservice + name: analyticsservice labels: app: analyticsservice spec: diff --git a/manifests/automationservice.yaml b/manifests/automationservice.yaml index e280f479b1a7300cd88848860431644e46075cf1..f6c97f7fb7635e4b04431d41dbf20ca3edc51475 100644 --- a/manifests/automationservice.yaml +++ b/manifests/automationservice.yaml @@ -40,11 +40,6 @@ spec: env: - name: LOG_LEVEL value: "INFO" - envFrom: -# - secretRef: -# name: crdb-analytics - - secretRef: - name: kfk-kpi-data startupProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:30200"] diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml index 04141b8ddb2963dd7455599386733b9cea4581be..cd35d2698816bcfc5bc2030506eb2897a85708f6 100644 --- a/manifests/telemetryservice.yaml +++ b/manifests/telemetryservice.yaml @@ -90,7 +90,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: telemetry-frontendservice + name: telemetryservice labels: app: telemetryservice spec: diff --git a/my_deploy.sh b/my_deploy.sh index 10a262c6e942de48c1265f7b6c8e72fe127a9de4..d6ed6259ae0299f112d62f62833badc8fd09eb13 100755 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -20,13 +20,13 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator automation" +export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" # Uncomment to activate Monitoring (old) #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" # Uncomment to activate Monitoring Framework (new) -export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics" +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" # Uncomment to activate BGP-LS Speaker #export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" @@ -42,10 +42,10 @@ export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_ #fi # Uncomment to activate ZTP -export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" # Uncomment to activate Policy Manager -export TFS_COMPONENTS="${TFS_COMPONENTS} policy" +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" # Uncomment to activate Optical CyberSecurity #export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" @@ -86,7 +86,7 @@ export TFS_IMAGE_TAG="dev" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" -: + # Set additional manifest files to be applied after the deployment export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" diff --git a/proto/automation.proto b/proto/automation.proto index c482dc0dad4d0768eac79c16ff95cb334f42dd65..edb1ef404171270e740e2b30944151d98607292c 100644 --- a/proto/automation.proto +++ b/proto/automation.proto @@ -20,11 +20,11 @@ import "policy.proto"; // Automation service RPCs service AutomationService { - rpc ZSMCreate (ZSMCreateRequest) returns (ZSMService) {} - rpc ZSMUpdate (ZSMCreateUpdate) returns (ZSMService) {} - rpc ZSMDelete (ZSMServiceID) returns (ZSMServiceState) {} - rpc ZSMGetById (ZSMServiceID) returns (ZSMService) {} - rpc ZSMGetByService (context.ServiceId) returns (ZSMService) {} + rpc ZSMCreate (ZSMCreateRequest ) returns (ZSMService ) {} + rpc ZSMUpdate (ZSMCreateUpdate ) returns (ZSMService ) {} + rpc ZSMDelete (ZSMServiceID ) returns (ZSMServiceState) {} + rpc ZSMGetById (ZSMServiceID ) returns (ZSMService ) {} + rpc ZSMGetByService (context.ServiceId) returns (ZSMService ) {} } // ZSM service states diff --git a/scripts/run_tests_locally-analytics-DB.sh b/scripts/run_tests_locally-analytics-DB.sh index a31ab81cee3ab71b70d04f997df0ef9ebc737697..3efc8f97177ee821d7c4c1ce19ccb0a0f8731cce 100755 --- a/scripts/run_tests_locally-analytics-DB.sh +++ b/scripts/run_tests_locally-analytics-DB.sh @@ -19,6 +19,6 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') -export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs-analyzer?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ analytics/tests/test_analytics_db.py diff --git a/scripts/run_tests_locally-analytics-backend.sh b/scripts/run_tests_locally-analytics-backend.sh index 722779824a8c91b5d7cff0337c10c4ea1327a1b6..f2e0120278f07f01e41ccd90357a5ee863cb5698 100755 --- a/scripts/run_tests_locally-analytics-backend.sh +++ b/scripts/run_tests_locally-analytics-backend.sh @@ -20,6 +20,6 @@ cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc export KFK_SERVER_ADDRESS='127.0.0.1:9092' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') -export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ analytics/backend/tests/test_backend.py diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh index e74eb4ec198d77688f62004931c69eac31e60f0c..ea04e0323b79c3308e279add86421bdc2d1a46bd 100755 --- a/scripts/run_tests_locally-analytics-frontend.sh +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -20,6 +20,6 @@ cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc export KFK_SERVER_ADDRESS='127.0.0.1:9092' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') -export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ analytics/frontend/tests/test_frontend.py diff --git a/src/analytics/backend/service/SparkStreaming.py b/src/analytics/backend/service/SparkStreaming.py new file mode 100644 index 0000000000000000000000000000000000000000..f204c6247436177cd032c777c048ecb165051ec2 --- /dev/null +++ b/src/analytics/backend/service/SparkStreaming.py @@ -0,0 +1,154 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, time +from pyspark.sql import SparkSession +from pyspark.sql.types import StructType, StructField, StringType, DoubleType, TimestampType +from pyspark.sql.functions import from_json, col, window, avg, min, max, first, last, stddev, when, round +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic + +LOGGER = logging.getLogger(__name__) + +def DefiningSparkSession(): + # Create a Spark session with specific spark verions (3.5.0) + return SparkSession.builder \ + .appName("Analytics") \ + .config("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") \ + .config("spark.jars.packages", "org.apache.spark:spark-sql-kafka-0-10_2.12:3.5.0") \ + .getOrCreate() + +def SettingKafkaConsumerParams(): # TODO: create get_kafka_consumer() in common with inputs (bootstrap server, subscribe, startingOffset and failOnDataLoss with default values) + return { + # "kafka.bootstrap.servers": '127.0.0.1:9092', + "kafka.bootstrap.servers": KafkaConfig.get_kafka_address(), + "subscribe" : KafkaTopic.VALUE.value, # topic should have atleast one message before spark session + "startingOffsets" : 'latest', + "failOnDataLoss" : 'false' # Optional: Set to "true" to fail the query on data loss + } + +def DefiningRequestSchema(): + return StructType([ + StructField("time_stamp" , StringType() , True), + StructField("kpi_id" , StringType() , True), + StructField("kpi_value" , DoubleType() , True) + ]) + +def GetAggregations(oper_list): + # Define the possible aggregation functions + agg_functions = { + 'avg' : round(avg ("kpi_value"), 3) .alias("avg_value"), + 'min' : round(min ("kpi_value"), 3) .alias("min_value"), + 'max' : round(max ("kpi_value"), 3) .alias("max_value"), + 'first': round(first ("kpi_value"), 3) .alias("first_value"), + 'last' : round(last ("kpi_value"), 3) .alias("last_value"), + 'stdev': round(stddev ("kpi_value"), 3) .alias("stdev_value") + } + return [agg_functions[op] for op in oper_list if op in agg_functions] # Filter and return only the selected aggregations + +def ApplyThresholds(aggregated_df, thresholds): + # Apply thresholds (TH-Fail and TH-RAISE) based on the thresholds dictionary on the aggregated DataFrame. + + # Loop through each column name and its associated thresholds + for col_name, (fail_th, raise_th) in thresholds.items(): + # Apply TH-Fail condition (if column value is less than the fail threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_FALL", + when(col(col_name) < fail_th, True).otherwise(False) + ) + # Apply TH-RAISE condition (if column value is greater than the raise threshold) + aggregated_df = aggregated_df.withColumn( + f"{col_name}_THRESHOLD_RAISE", + when(col(col_name) > raise_th, True).otherwise(False) + ) + return aggregated_df + +def SparkStreamer(key, kpi_list, oper_list, thresholds, stop_event, + window_size=None, win_slide_duration=None, time_stamp_col=None): + """ + Method to perform Spark operation Kafka stream. + NOTE: Kafka topic to be processesd should have atleast one row before initiating the spark session. + """ + kafka_consumer_params = SettingKafkaConsumerParams() # Define the Kafka consumer parameters + schema = DefiningRequestSchema() # Define the schema for the incoming JSON data + spark = DefiningSparkSession() # Define the spark session with app name and spark version + + # extra options default assignment + if window_size is None: window_size = "60 seconds" # default + if win_slide_duration is None: win_slide_duration = "30 seconds" # default + if time_stamp_col is None: time_stamp_col = "time_stamp" # default + + try: + # Read data from Kafka + raw_stream_data = spark \ + .readStream \ + .format("kafka") \ + .options(**kafka_consumer_params) \ + .load() + + # Convert the value column from Kafka to a string + stream_data = raw_stream_data.selectExpr("CAST(value AS STRING)") + # Parse the JSON string into a DataFrame with the defined schema + parsed_stream_data = stream_data.withColumn("parsed_value", from_json(col("value"), schema)) + # Select the parsed fields + final_stream_data = parsed_stream_data.select("parsed_value.*") + # Convert the time_stamp to proper timestamp (assuming it's in ISO format) + final_stream_data = final_stream_data.withColumn(time_stamp_col, col(time_stamp_col).cast(TimestampType())) + # Filter the stream to only include rows where the kpi_id is in the kpi_list + filtered_stream_data = final_stream_data.filter(col("kpi_id").isin(kpi_list)) + # Define a window for aggregation + windowed_stream_data = filtered_stream_data \ + .groupBy( + window( col(time_stamp_col), + window_size, slideDuration=win_slide_duration + ), + col("kpi_id") + ) \ + .agg(*GetAggregations(oper_list)) + # Apply thresholds to the aggregated data + thresholded_stream_data = ApplyThresholds(windowed_stream_data, thresholds) + + # --- This will write output on console: FOR TESTING PURPOSES + # Start the Spark streaming query + # query = thresholded_stream_data \ + # .writeStream \ + # .outputMode("update") \ + # .format("console") + + # --- This will write output to Kafka: ACTUAL IMPLEMENTATION + query = thresholded_stream_data \ + .selectExpr(f"CAST(kpi_id AS STRING) AS key", "to_json(struct(*)) AS value") \ + .writeStream \ + .format("kafka") \ + .option("kafka.bootstrap.servers", KafkaConfig.get_kafka_address()) \ + .option("topic", KafkaTopic.ALARMS.value) \ + .option("checkpointLocation", "analytics/.spark/checkpoint") \ + .outputMode("update") + + # Start the query execution + queryHandler = query.start() + + # Loop to check for stop event flag. To be set by stop collector method. + while True: + if stop_event.is_set(): + LOGGER.debug("Stop Event activated. Terminating in 5 seconds...") + print ("Stop Event activated. Terminating in 5 seconds...") + time.sleep(5) + queryHandler.stop() + break + time.sleep(5) + + except Exception as e: + print("Error in Spark streaming process: {:}".format(e)) + LOGGER.debug("Error in Spark streaming process: {:}".format(e)) diff --git a/src/analytics/backend/service/__main__.py b/src/analytics/backend/service/__main__.py index 78a06a8c6c9aada6536d6b01147d8ec39d4f404c..3c4c36b7c7bd952164bf9e48a45e22fb00575564 100644 --- a/src/analytics/backend/service/__main__.py +++ b/src/analytics/backend/service/__main__.py @@ -37,8 +37,8 @@ def main(): LOGGER.info('Starting...') # Start metrics server - # metrics_port = get_metrics_port() - # start_http_server(metrics_port) + metrics_port = get_metrics_port() + start_http_server(metrics_port) grpc_service = AnalyticsBackendService() grpc_service.start() diff --git a/src/analytics/frontend/Dockerfile b/src/analytics/frontend/Dockerfile index ac9b3fe9515c621d769b98d3c7609a3003bb42bd..10499713f318a23e1aeab49c96e8163a5ec147fa 100644 --- a/src/analytics/frontend/Dockerfile +++ b/src/analytics/frontend/Dockerfile @@ -62,9 +62,9 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow -COPY ./src/analytics/__init__.py analytics/__init__.py -COPY ./src/analytics/frontend/. analytics/frontend/ -COPY ./src/analytics/database/. analytics/database/ +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/frontend/. analytics/frontend/ +COPY src/analytics/database/. analytics/database/ # Start the service ENTRYPOINT ["python", "-m", "analytics.frontend.service"] diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py index bdab55198575addaa07e1e16f0883ba558c033da..58bee3cd2fc0a77eec471c8f4beac9c45981776d 100644 --- a/src/analytics/frontend/tests/test_frontend.py +++ b/src/analytics/frontend/tests/test_frontend.py @@ -130,4 +130,4 @@ def test_StartAnalyzers(analyticsFrontend_client): # class_obj = AnalyticsFrontendServiceServicerImpl() # for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid): # LOGGER.debug(response) -# assert isinstance(response, tuple) \ No newline at end of file +# assert isinstance(response, tuple) diff --git a/src/automation/Dockerfile b/src/automation/Dockerfile index ae6c8381134b3f1192113cbe901482d650218e6a..deef6b36fb4ab324e75a687bc3ce8c297c186c8f 100644 --- a/src/automation/Dockerfile +++ b/src/automation/Dockerfile @@ -62,27 +62,17 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory WORKDIR /var/teraflow -COPY src/telemetry/frontend/__init__.py telemetry/frontend/__init__.py -COPY src/telemetry/frontend/client/. telemetry/frontend/client/ - -COPY src/analytics/frontend/client/. analytics/frontend/client/ -COPY src/analytics/frontend/service/. analytics/frontend/service/ -COPY src/analytics/database/. analytics/database/ -COPY src/analytics/frontend/__init__.py analytics/frontend/__init__.py - COPY src/context/__init__.py context/__init__.py COPY src/context/client/. context/client/ - -COPY src/kpi_value_api/__init__.py kpi_value_api/__init__.py -COPY src/kpi_value_api/client/. kpi_value_api/client/ - COPY src/kpi_manager/__init__.py kpi_manager/__init__.py COPY src/kpi_manager/client/. kpi_manager/client/ - -COPY src/monitoring/__init__.py monitoring/__init__.py -COPY src/monitoring/client/. monitoring/client/ - +COPY src/telemetry/__init__.py telemetry/__init__.py +COPY src/telemetry/frontend/__init__.py telemetry/frontend/__init__.py +COPY src/telemetry/frontend/client/. telemetry/frontend/client/ +COPY src/analytics/__init__.py analytics/__init__.py +COPY src/analytics/frontend/__init__.py analytics/frontend/__init__.py +COPY src/analytics/frontend/client/. analytics/frontend/client/ COPY src/automation/. automation/ # Start the service -ENTRYPOINT ["python", "-m", "automation.service"] \ No newline at end of file +ENTRYPOINT ["python", "-m", "automation.service"] diff --git a/src/automation/client/PolicyClient.py b/src/automation/client/PolicyClient.py index 22f2aa18b520b8162b9df9ba3af3ebe1b77d5051..f2b25f2429d790c9cb25b5d70e1e737594586133 100644 --- a/src/automation/client/PolicyClient.py +++ b/src/automation/client/PolicyClient.py @@ -52,4 +52,4 @@ class PolicyClient: LOGGER.debug('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.PolicyAddService(request) LOGGER.debug('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response))) - return response \ No newline at end of file + return response diff --git a/src/automation/service/AutomationServiceServicerImpl.py b/src/automation/service/AutomationServiceServicerImpl.py index fa079b7b79dbd956a77a6004248b6843a5c7cf0d..94550157e09a316f5705ce2b052a8837d75cd1b7 100644 --- a/src/automation/service/AutomationServiceServicerImpl.py +++ b/src/automation/service/AutomationServiceServicerImpl.py @@ -12,35 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc , logging, os, grpc +import grpc, json, logging +from uuid import uuid4 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from common.method_wrappers.Decorator import MetricsPool +from common.method_wrappers.ServiceExceptions import InvalidArgumentException +from common.proto.analytics_frontend_pb2 import Analyzer, AnalyzerId +from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService, ZSMServiceID, ZSMServiceState, ZSMCreateUpdate from common.proto.automation_pb2_grpc import AutomationServiceServicer -from common.proto.automation_pb2 import ( ZSMCreateRequest , ZSMService ,ZSMServiceID ,ZSMServiceState,ZSMCreateUpdate , ZSMServiceStateEnum) -from common.proto.context_pb2 import ( ServiceId , ContextId , Uuid , Empty) -from common.proto.telemetry_frontend_pb2 import ( Collector , CollectorId ) -from common.proto.policy_pb2 import ( PolicyRuleList) -from context.client.ContextClient import ContextClient -from automation.client.PolicyClient import PolicyClient -from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient -from kpi_manager.client.KpiManagerClient import KpiManagerClient -from common.proto.context_pb2 import ( Service ) - -from common.proto.kpi_manager_pb2 import (KpiId, KpiDescriptor) -from common.proto.kpi_value_api_pb2 import (KpiAlarms) - +from common.proto.context_pb2 import Service, ServiceId +from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState -from common.proto.policy_action_pb2 import PolicyRuleAction , PolicyRuleActionConfig +from common.proto.policy_action_pb2 import PolicyRuleAction, PolicyRuleActionConfig from common.proto.policy_condition_pb2 import PolicyRuleCondition -from uuid import uuid4 -import json +from common.proto.telemetry_frontend_pb2 import Collector, CollectorId -from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient -from common.proto.analytics_frontend_pb2 import Analyzer, AnalyzerId - -from kpi_value_api.client.KpiValueApiClient import KpiValueApiClient -from common.method_wrappers.ServiceExceptions import InvalidArgumentException +from automation.client.PolicyClient import PolicyClient +from context.client.ContextClient import ContextClient +from kpi_manager.client.KpiManagerClient import KpiManagerClient +from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Automation', 'RPC') diff --git a/src/automation/service/__main__.py b/src/automation/service/__main__.py index ef0f107b42e0629933c57441ba5cf4023aa2fdd7..39d8beaffd959744b83d7e1ace78da2b1b800a21 100644 --- a/src/automation/service/__main__.py +++ b/src/automation/service/__main__.py @@ -50,4 +50,4 @@ def main(): return 0 if __name__ == '__main__': - sys.exit(main()) \ No newline at end of file + sys.exit(main()) diff --git a/src/automation/tests/test_unitary_emulated.py b/src/automation/tests/test_unitary_emulated.py index 24f18a2f4ffba08a608d8c89db5e05a164f561d9..dee3c4fabed496e50a7c7ee5dafa9a98be1d474d 100644 --- a/src/automation/tests/test_unitary_emulated.py +++ b/src/automation/tests/test_unitary_emulated.py @@ -19,4 +19,4 @@ LOGGER = logging.getLogger(__name__) def test_device_emulated_add_error_cases(): LOGGER.info("Start Tests") LOGGER.info("Second log Tests") - assert True \ No newline at end of file + assert True diff --git a/src/automation/tests/test_unitary_ietf_actn.py b/src/automation/tests/test_unitary_ietf_actn.py index 78d6b003e33288cbdfeeff660a3d2abb73fac46a..37a5ac4968c9bb39682b84b3e061ff4a35dbe0f1 100644 --- a/src/automation/tests/test_unitary_ietf_actn.py +++ b/src/automation/tests/test_unitary_ietf_actn.py @@ -18,4 +18,4 @@ LOGGER = logging.getLogger(__name__) def test_device_emulated_add_error_cases(): LOGGER.info("Start Tests") - assert True \ No newline at end of file + assert True diff --git a/src/common/Constants.py b/src/common/Constants.py index 7b83baa814cc09a140dbbcf52b06976c354d3bcb..4fff09ba0d5061a41fa01d6ad44228283d2fca51 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -86,7 +86,6 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.MONITORING .value : 7070, ServiceNameEnum.DLT .value : 8080, ServiceNameEnum.NBI .value : 9090, - ServiceNameEnum.AUTOMATION .value : 1020, ServiceNameEnum.L3_CAD .value : 10001, ServiceNameEnum.L3_AM .value : 10002, ServiceNameEnum.DBSCANSERVING .value : 10008, @@ -108,6 +107,7 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.TELEMETRYBACKEND .value : 30060, ServiceNameEnum.ANALYTICSFRONTEND .value : 30080, ServiceNameEnum.ANALYTICSBACKEND .value : 30090, + ServiceNameEnum.AUTOMATION .value : 30200, # Used for test and debugging only ServiceNameEnum.DLT_GATEWAY .value : 50051, diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/policy/kafka/TopicAlarmDeserializer.java b/src/policy/src/main/java/org/etsi/tfs/policy/policy/kafka/TopicAlarmDeserializer.java index 48945bb4468f01d99200438b7fe1f0c278f72b76..5cf476c54aef2c65f98a7d54116d01c36a0aea54 100644 --- a/src/policy/src/main/java/org/etsi/tfs/policy/policy/kafka/TopicAlarmDeserializer.java +++ b/src/policy/src/main/java/org/etsi/tfs/policy/policy/kafka/TopicAlarmDeserializer.java @@ -1,3 +1,19 @@ +/* + * Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.etsi.tfs.policy.policy.kafka; import io.quarkus.kafka.client.serialization.ObjectMapperDeserializer; diff --git a/src/telemetry/frontend/service/__main__.py b/src/telemetry/frontend/service/__main__.py index 58c622fc3da5906ba557cff829bb15965c164b34..6697ff5f10e58b494736738e631a29a20691732d 100644 --- a/src/telemetry/frontend/service/__main__.py +++ b/src/telemetry/frontend/service/__main__.py @@ -44,8 +44,8 @@ def main(): kpiDBobj.create_tables() # Start metrics server - # metrics_port = get_metrics_port() - # start_http_server(metrics_port) + metrics_port = get_metrics_port() + start_http_server(metrics_port) grpc_service = TelemetryFrontendService() grpc_service.start() diff --git a/update_tfs_runtime_env_vars.sh b/update_tfs_runtime_env_vars.sh index 63a692c9f62c361c2461adc16c11e59d7fee1b19..209551c03a85909b5f02b70f22704675a2f64df5 100755 --- a/update_tfs_runtime_env_vars.sh +++ b/update_tfs_runtime_env_vars.sh @@ -20,7 +20,7 @@ # If not already set, set the list of components you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service nbi webui automation"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device monitoring service nbi webui"} # If not already set, set the name of the Kubernetes namespace to deploy to. export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}