Loading proto/automation.proto +3 −10 Original line number Diff line number Diff line Loading @@ -37,10 +37,6 @@ enum ZSMServiceStateEnum { ZSM_REMOVED = 5; // ZSM loop is removed } enum ZSMTypeEnum { ZSMTYPE_UNKNOWN = 0; } message ZSMCreateRequest { context.ServiceId target_service_id = 1; context.ServiceId telemetry_service_id = 2; Loading @@ -62,9 +58,6 @@ message ZSMServiceState { // Basic ZSM service attributes message ZSMService { ZSMServiceID zsmServiceId = 1; context.ServiceId serviceId = 2; policy.PolicyRuleList policyList = 3; // TODO: When new Analytics and updated Monitoring are in place, add the necessary binding to them } src/automation/README.md 0 → 100644 +116 −0 Original line number Diff line number Diff line # ETSI TFS Automation Service The Automation service exposes a gPRC API with the following methods: - ZSMCreate - ZSMDelete - ZSMGetById - ZSMGetByService To Invoke this API follow the steps below: ## Install grpcurl ```bash curl -sSL "https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz" | sudo tar -xz -C /usr/local/bin ``` ## Fetch available methods around Automation ```bash cd ~/tfs-ctrl/ grpcurl -import-path ./proto -proto automation.proto list grpcurl -import-path ./proto -proto automation.proto describe automation.AutomationService grpcurl -import-path ./proto -proto automation.proto describe automation.AutomationService.ZSMCreate ``` ## Try an example ZSMCreate TFS tests are now augmented with a new Automation example that shows the way to trigger a ZSM loop creaiton. The following script invokes Automation on top of a specific example topology. ```bash bash src/tests/automation/run_test_automation.sh ``` More details are provided in `src/tests/automation/README.md` ## Important Services WebUI ``` http://10.10.10.41/webui/ ``` Grafana ``` http://10.10.10.41/grafana ``` Prometheus ``` http://10.10.10.41:30090/ ``` ## Check Kafka topics The following commands may help you debug a closed loop that involves Telemetry, Analytics, Policy, all managed by Automation. Kafka is a key element for Automation as the KPIs managed by Analyzer create alarms that propagate to Policy via a dedicated Kafka topic. Checking this topic is key for ensuring proper communication between Analytics and Policy. Get all pods in Kafka namespace: ```bash kubectl get pods -n kafka NAME READY STATUS RESTARTS AGE kafka-0 1/1 Running 2 (90d ago) 104d kafka-broker-5f9656cc68-d8fzz 1/1 Running 13 (44h ago) 429d zookeeper-8664c6774d-nvbgg 1/1 Running 6 (90d ago) 429d ``` Query Kafka from within the Kafka broker's pod: ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --list Output> backend KpiValueWriter analytics-backend policy ``` Now let's see the consumer groups. ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --list Output> backend KpiValueWriter analytics-backend policy ``` Let's describe the policy group: ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --describe \ --group policy Output> GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID policy topic_alarms 0 40 41 1 kafka-consumer-topic_alarms-c8f09ac5-8c44-42ec-a13b-1af2c47fb86c /10.1.181.228 kafka-consumer-topic_alarms ``` src/automation/client/AutomationClient.py 0 → 100644 +54 −0 Original line number Diff line number Diff line # Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.automation_pb2_grpc import AutomationServiceStub from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService, ZSMServiceID, ZSMServiceState from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') class AutomationClient: def __init__(self, host=None, port=None): if not host: host = get_service_host(ServiceNameEnum.AUTOMATION) if not port: port = get_service_port_grpc(ServiceNameEnum.AUTOMATION) self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) LOGGER.info('Creating channel to {:s}...'.format(str(self.endpoint))) self.channel = None self.stub = None self.openconfig_stub = None self.connect() LOGGER.info('Channel created') def connect(self): self.channel = grpc.insecure_channel(self.endpoint) self.stub = AutomationServiceStub(self.channel) def close(self): if self.channel is not None: self.channel.close() self.channel = None self.stub = None @RETRY_DECORATOR def ZSMCreate(self, request : ZSMCreateRequest) -> ZSMService: # type: ignore LOGGER.info('ZSMCreate request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.ZSMCreate(request) LOGGER.info('ZSMCreate result: {:s}'.format(grpc_message_to_json_string(response))) return response src/automation/client/PolicyClient.py +4 −4 Original line number Diff line number Diff line Loading @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import grpc, logging import grpc, logging, uuid from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState Loading Loading @@ -47,8 +47,8 @@ class PolicyClient: self.stub = None @RETRY_DECORATOR def PolicyAddService(self, request : PolicyRuleService) -> PolicyRuleState: LOGGER.debug('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request))) def PolicyAddService(self, request : PolicyRuleService) -> PolicyRuleState: # type: ignore LOGGER.info('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.PolicyAddService(request) LOGGER.debug('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response))) LOGGER.info('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response))) return response src/automation/service/AutomationServiceServicerImpl.py +1 −1 Original line number Diff line number Diff line Loading @@ -38,7 +38,7 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL,LOGGER) def ZSMCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext) -> ZSMService: LOGGER.info("Received gRPC message object: {:}".format(request)) LOGGER.info("Received gRPC message object:\n{:}".format(request)) context_client = ContextClient() targetService = context_client.GetService(request.target_service_id) Loading Loading
proto/automation.proto +3 −10 Original line number Diff line number Diff line Loading @@ -37,10 +37,6 @@ enum ZSMServiceStateEnum { ZSM_REMOVED = 5; // ZSM loop is removed } enum ZSMTypeEnum { ZSMTYPE_UNKNOWN = 0; } message ZSMCreateRequest { context.ServiceId target_service_id = 1; context.ServiceId telemetry_service_id = 2; Loading @@ -62,9 +58,6 @@ message ZSMServiceState { // Basic ZSM service attributes message ZSMService { ZSMServiceID zsmServiceId = 1; context.ServiceId serviceId = 2; policy.PolicyRuleList policyList = 3; // TODO: When new Analytics and updated Monitoring are in place, add the necessary binding to them }
src/automation/README.md 0 → 100644 +116 −0 Original line number Diff line number Diff line # ETSI TFS Automation Service The Automation service exposes a gPRC API with the following methods: - ZSMCreate - ZSMDelete - ZSMGetById - ZSMGetByService To Invoke this API follow the steps below: ## Install grpcurl ```bash curl -sSL "https://github.com/fullstorydev/grpcurl/releases/download/v1.8.7/grpcurl_1.8.7_linux_x86_64.tar.gz" | sudo tar -xz -C /usr/local/bin ``` ## Fetch available methods around Automation ```bash cd ~/tfs-ctrl/ grpcurl -import-path ./proto -proto automation.proto list grpcurl -import-path ./proto -proto automation.proto describe automation.AutomationService grpcurl -import-path ./proto -proto automation.proto describe automation.AutomationService.ZSMCreate ``` ## Try an example ZSMCreate TFS tests are now augmented with a new Automation example that shows the way to trigger a ZSM loop creaiton. The following script invokes Automation on top of a specific example topology. ```bash bash src/tests/automation/run_test_automation.sh ``` More details are provided in `src/tests/automation/README.md` ## Important Services WebUI ``` http://10.10.10.41/webui/ ``` Grafana ``` http://10.10.10.41/grafana ``` Prometheus ``` http://10.10.10.41:30090/ ``` ## Check Kafka topics The following commands may help you debug a closed loop that involves Telemetry, Analytics, Policy, all managed by Automation. Kafka is a key element for Automation as the KPIs managed by Analyzer create alarms that propagate to Policy via a dedicated Kafka topic. Checking this topic is key for ensuring proper communication between Analytics and Policy. Get all pods in Kafka namespace: ```bash kubectl get pods -n kafka NAME READY STATUS RESTARTS AGE kafka-0 1/1 Running 2 (90d ago) 104d kafka-broker-5f9656cc68-d8fzz 1/1 Running 13 (44h ago) 429d zookeeper-8664c6774d-nvbgg 1/1 Running 6 (90d ago) 429d ``` Query Kafka from within the Kafka broker's pod: ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --list Output> backend KpiValueWriter analytics-backend policy ``` Now let's see the consumer groups. ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --list Output> backend KpiValueWriter analytics-backend policy ``` Let's describe the policy group: ```bash kubectl exec -it kafka-0 -n kafka -- \ kafka-consumer-groups.sh \ --bootstrap-server kafka-service.kafka.svc.cluster.local:9092 \ --describe \ --group policy Output> GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID policy topic_alarms 0 40 41 1 kafka-consumer-topic_alarms-c8f09ac5-8c44-42ec-a13b-1af2c47fb86c /10.1.181.228 kafka-consumer-topic_alarms ```
src/automation/client/AutomationClient.py 0 → 100644 +54 −0 Original line number Diff line number Diff line # Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.automation_pb2_grpc import AutomationServiceStub from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService, ZSMServiceID, ZSMServiceState from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect') class AutomationClient: def __init__(self, host=None, port=None): if not host: host = get_service_host(ServiceNameEnum.AUTOMATION) if not port: port = get_service_port_grpc(ServiceNameEnum.AUTOMATION) self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) LOGGER.info('Creating channel to {:s}...'.format(str(self.endpoint))) self.channel = None self.stub = None self.openconfig_stub = None self.connect() LOGGER.info('Channel created') def connect(self): self.channel = grpc.insecure_channel(self.endpoint) self.stub = AutomationServiceStub(self.channel) def close(self): if self.channel is not None: self.channel.close() self.channel = None self.stub = None @RETRY_DECORATOR def ZSMCreate(self, request : ZSMCreateRequest) -> ZSMService: # type: ignore LOGGER.info('ZSMCreate request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.ZSMCreate(request) LOGGER.info('ZSMCreate result: {:s}'.format(grpc_message_to_json_string(response))) return response
src/automation/client/PolicyClient.py +4 −4 Original line number Diff line number Diff line Loading @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import grpc, logging import grpc, logging, uuid from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState Loading Loading @@ -47,8 +47,8 @@ class PolicyClient: self.stub = None @RETRY_DECORATOR def PolicyAddService(self, request : PolicyRuleService) -> PolicyRuleState: LOGGER.debug('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request))) def PolicyAddService(self, request : PolicyRuleService) -> PolicyRuleState: # type: ignore LOGGER.info('AddPolicy request: {:s}'.format(grpc_message_to_json_string(request))) response = self.stub.PolicyAddService(request) LOGGER.debug('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response))) LOGGER.info('AddPolicy result: {:s}'.format(grpc_message_to_json_string(response))) return response
src/automation/service/AutomationServiceServicerImpl.py +1 −1 Original line number Diff line number Diff line Loading @@ -38,7 +38,7 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL,LOGGER) def ZSMCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext) -> ZSMService: LOGGER.info("Received gRPC message object: {:}".format(request)) LOGGER.info("Received gRPC message object:\n{:}".format(request)) context_client = ContextClient() targetService = context_client.GetService(request.target_service_id) Loading