From d352735669b21101b09e744365b1687d5b4a6ff4 Mon Sep 17 00:00:00 2001 From: TFS Date: Tue, 26 Mar 2024 23:32:35 +0100 Subject: [PATCH 01/41] new test script --- src/tests/ofc24/deploy-node-agents.sh | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/tests/ofc24/deploy-node-agents.sh b/src/tests/ofc24/deploy-node-agents.sh index 5c3c8d0d2..79d6c1dc3 100755 --- a/src/tests/ofc24/deploy-node-agents.sh +++ b/src/tests/ofc24/deploy-node-agents.sh @@ -34,21 +34,18 @@ echo "Create Management Network and Node Agents:" echo "------------------------------------------" docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 --ip-range=172.254.253.0/24 na-br docker run -d --name na-t1 --network=na-br --ip 172.254.253.1 \ - --volume "$PWD/src/tests/${TEST_NAME}/startNetconfAgent.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/platform_t1.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-hhi.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + asgamb1/flexscale-hhi.img:latest ./startNetconfAgent.sh docker run -d --name na-t2 --network=na-br --ip 172.254.253.2 \ - --volume "$PWD/src/tests/${TEST_NAME}/startNetconfAgent.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/platform_t2.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-hhi.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + asgamb1/flexscale-hhi.img:latest ./startNetconfAgent.sh docker run -d --name na-r1 --network=na-br --ip 172.254.253.101 \ - --volume "$PWD/src/tests/${TEST_NAME}/startNetconfAgent.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/platform_r1.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + asgamb1/flexscale-node.img:latest ./startNetconfAgent.sh + docker run -d --name na-r2 --network=na-br --ip 172.254.253.102 \ - --volume "$PWD/src/tests/${TEST_NAME}/startNetconfAgent.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/platform_r2.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + asgamb1/flexscale-node.img:latest ./startNetconfAgent.sh echo -- GitLab From 830ce1265d2787cbe97fab7e17714671fa7fd053 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Thu, 23 Oct 2025 23:05:28 +0200 Subject: [PATCH 02/41] polimi flexgrid working+ preliminary band protection --- deploy/all.sh | 2 +- src/opticalcontroller/OpticalController.py | 31 +- src/opticalcontroller/RSA.py | 185 ++++++++++-- .../service/ServiceServiceServicerImpl.py | 283 +++++++++++++----- .../service/task_scheduler/TaskScheduler.py | 193 ++++++++++-- src/service/service/tools/OpticalTools.py | 155 +++++++++- src/service/tests/test_recon.py | 101 +++++++ 7 files changed, 815 insertions(+), 135 deletions(-) create mode 100644 src/service/tests/test_recon.py diff --git a/deploy/all.sh b/deploy/all.sh index 764ec7dbc..25f8de7b1 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -213,7 +213,7 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ./deploy/qdb.sh # Deploy Apache Kafka -#./deploy/kafka.sh +./deploy/kafka.sh #Deploy Monitoring (Prometheus, Mimir, Grafana) #./deploy/monitoring.sh diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index e859e3ba1..075c8a42b 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -80,10 +80,10 @@ class AddFlexLightpath(Resource): @staticmethod def put(src, dst, bitrate, bidir=1, band=None): - print("INFO: New FlexLightpath request from {} to {} with rate {} ".format(src, dst, bitrate)) + print("INFO: New FlexLightpath request from {} to {} with rate {} and band {}".format(src, dst, bitrate, band)) t0 = time.time()*1000.0 - if debug: - rsa.g.printGraph() + #if debug: + # rsa.g.printGraph() if rsa is not None: flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band) @@ -106,6 +106,7 @@ class AddFlexLightpath(Resource): return rsa.optical_bands[optical_band_id], 200 else: return "Error", 404 + # @optical.route('/DelFlexLightpath////') @optical.route('/DelFlexLightpath////') @optical.route('/DelFlexLightpath/////') @@ -280,7 +281,7 @@ class DelLightpath(Resource): match1 = flow["src"] == src and flow["dst"] == dst and flow["bitrate"] == bitrate match2 = flow["src"] == dst and flow["dst"] == src and flow["bitrate"] == bitrate if match1 or match2: - rsa.del_flow(flow) + rsa.del_flow(flow, flow_id) rsa.db_flows[flow_id]["is_active"] = False if debug: print(rsa.links_dict) @@ -331,6 +332,28 @@ class GetBand(Resource): return rsa.optical_bands[ob_idx], 200 return {}, 404 +@optical.route('/ReconfigFlexLightpath/') +@optical.response(200, 'Success') +@optical.response(404, 'Error, not found') +class ReconfigFlexLightpath(Resource): + @staticmethod + def put(flow_id_val): + print("INFO: Reconfiguring optical {}".format(flow_id_val)) + t0 = time.time()*1000.0 + if rsa is not None: + flow_idx, optical_band_id = rsa.rsa_fs_recomputation(flow_id_val) + if flow_idx is not None: + if rsa.db_flows[flow_idx]["op-mode"] == 0: + return 'No path found', 404 + t1 = time.time() * 1000.0 + elapsed = t1 - t0 + print("INFO: time elapsed = {} ms".format(elapsed)) + print(flow_idx, optical_band_id) + return rsa.db_flows[flow_idx], 200 + else: + return "Error", 404 + else: + return "Error", 404 @optical.route('/GetLinks') @optical.response(200, 'Success') diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 95a49113e..861afeb01 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -17,6 +17,8 @@ from opticalcontroller.dijkstra import * from opticalcontroller.tools import * from opticalcontroller.variables import * debug = 1 + + ''' LOGGER = logging.getLogger(__name__) @@ -256,14 +258,14 @@ class RSA(): if "l_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["l_slots"]) > 0: a_l = l_sts - b_l = consecutives(self.optical_bands[optical_band_id]["l_slots"], val_c) + b_l = consecutives(self.optical_bands[optical_band_id]["l_slots"], val_l) l_sts = common_slots(a_l, b_l) else: l_sts = [] if "s_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["s_slots"]) > 0: a_s = s_sts - b_s = consecutives(str_list_to_int(self.optical_bands[optical_band_id]["s_slots"].keys()), val_c) + b_s = consecutives(self.optical_bands[optical_band_id]["s_slots"], val_s) s_sts = common_slots(a_s, b_s) else: s_sts = [] @@ -480,8 +482,7 @@ class RSA(): print(f"delete band del_band") self.del_band(flow,flow_id,o_b_id=o_b_id) else : - self.del_flow(flow,flow_id=flow_id,o_b_id=o_b_id) - + self.del_flow(flow,flow_id=flow_id,o_b_id=o_b_id) def get_fibers_forward(self, links, slots, band): @@ -652,16 +653,17 @@ class RSA(): print(self.links_dict) band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) if band is None: - print("No slots available in the three bands") + print("ERROR: No slots available in the three bands") return None, None, None, None, None if debug: - print(band, slots) + print(f"INFO: XXXX {band}, {slots}") self.get_fibers_forward(links, slots, band) if bidir: self.get_fibers_backward(links, slots, band) #fibers_f = self.get_fibers_forward(links, slots, band) self.update_optical_band(o_band_id, slots, band) + print("INFO: 1") #fibers_b = [] #if bidir: # fibers_b = self.get_fibers_backward(links, fibers_f, slots, band) @@ -690,7 +692,8 @@ class RSA(): #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] r_inport = lx["local_peer_port"] t_flows[src]["b"] = {"in": r_inport, "out": port_0} - + print("INFO: 2") + #R1 rules t_flows[dst] = {} t_flows[dst]["f"] = {} @@ -737,12 +740,13 @@ class RSA(): #r_inport = self.links_dict[drop]['fibers'][f]["remote_peer_port"] r_inport = ly["remote_peer_port"] t_flows[dst]["b"] = {"in": port_0, "out": r_inport} - - if debug: - print(self.links_dict) - - if debug: - print(t_flows) + print("INFO: 3") + + #if debug: + # print(self.links_dict) + print("INFO: 4") + #if debug: + # print(t_flows) print("INFO: Flow matrix computed for Flex Lightpath") return t_flows, band, slots, {}, {} @@ -832,8 +836,8 @@ class RSA(): self.optical_bands[ob_id]["s_slots"] = [] self.optical_bands[ob_id]["served_lightpaths"] = [] self.optical_bands[ob_id]["reverse_optical_band_id"] = 0 - self.db_flows[self.flow_id]["parent_opt_band"] = 0 - self.db_flows[self.flow_id]["new_optical_band"] = 0 + #self.db_flows[flow_id]["parent_opt_band"] = 0 + #self.db_flows[flow_id]["new_optical_band"] = 0 def create_optical_band(self, links, path, bidir, num_slots): print("INFO: Creating optical-band of {} slots".format(num_slots)) @@ -899,7 +903,7 @@ class RSA(): print(f0, band) print("INFO: RSA completed for optical band") if flow_list is None: - self.null_values(self.flow_id) + self.null_values_ob(self.opt_band_id) return self.flow_id, [] #slots_i = [] #for i in slots: @@ -988,16 +992,17 @@ class RSA(): return result def rsa_fs_computation(self, src, dst, rate, bidir, band): - num_slots_ob = "full_band" if band is not None: num_slots_ob = map_band_to_slot(band) print(band, num_slots_ob) + else: + num_slots_ob = "full_band" if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM": print("INFO: ROADM to ROADM connection") links, path = self.compute_path(src, dst) if len(path) < 1: self.null_values_ob(self.opt_band_id) - return self.flow_id, [] + return self.opt_band_id, [] optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob) return None, optical_band_id print("INFO: TP to TP connection") @@ -1008,6 +1013,9 @@ class RSA(): self.flow_id += 2 else: self.flow_id += 1 + if band is not None: + num_slots_ob = map_band_to_slot(band) + print(band, num_slots_ob) self.db_flows[self.flow_id] = {} self.db_flows[self.flow_id]["flow_id"] = self.flow_id self.db_flows[self.flow_id]["src"] = src @@ -1086,7 +1094,7 @@ class RSA(): ''' if bidir: rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, ob_id else: @@ -1147,13 +1155,13 @@ class RSA(): self.db_flows[self.flow_id]["freq"] = f0 self.db_flows[self.flow_id]["is_active"] = True self.db_flows[self.flow_id]["parent_opt_band"] = ob_id - #self.db_flows[self.flow_id]["new_optical_band"] = 1 + #self.db_flows[flow_id]["new_optical_band"] = 1 self.db_flows[self.flow_id]["new_optical_band"] = 2 self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) ''' if bidir: rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, ob_id else: @@ -1184,7 +1192,7 @@ class RSA(): print("INFO: RSA completed for FLex Lightpath with new OB") if flow_list is None: self.null_values(self.flow_id) - return self.flow_id, optical_band_id + return flow_id, optical_band_id slots_i = [] for i in slots: slots_i.append(int(i)) @@ -1207,11 +1215,142 @@ class RSA(): ''' if bidir: rev_ob_id = self.optical_bands[optical_band_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, optical_band_id + def move_flow(self, flow_id, slots, band, links, bidir, o_b_id = None): + for l in links: + link = self.get_link_by_name(l) + fib = link["optical_details"] + #self.restore_link(fib, slots, band) + self.restore_link_2(fib, slots, band, link=link) + if o_b_id is not None: + if debug: + print("restoring OB") + print(f"invoking restore_optical_band o_b_id: {o_b_id} , slots {slots} , band {band} ") + self.restore_optical_band(o_b_id, slots, band) + if flow_id in self.optical_bands[o_b_id]["served_lightpaths"]: + if flow_id in self.optical_bands[o_b_id]["served_lightpaths"]: + self.optical_bands[o_b_id]["served_lightpaths"].remove(flow_id) + + #self.restore_optical_band_2(o_b_id, slots, band,links) + if bidir: + for l in links: + r_l = reverse_link(l) + if debug: + print(r_l) + rlink = self.get_link_by_name(r_l) + fib = rlink["optical_details"] + #fib = self.get_link_by_name(r_l)["optical_details"] + if list_in_list(slots, str_list_to_int(fib[band].keys())): + #self.restore_link(fib, slots, band, link=l) + self.restore_link_2(fib, slots, band, link=rlink) + if debug: + print(fib[band]) + + return True + + def rsa_fs_recomputation(self, flow_idy): + flow_idx = int(flow_idy) + print(f"INFO: Reconifguring connection {flow_idx}") + if flow_idx not in self.db_flows.keys(): + print(f"ERROR: key not present {flow_idx}") + else: + print(self.db_flows[flow_idx]) + #self.db_flows[flow_idx] = {} + src = self.db_flows[flow_idx]["src"] + dst = self.db_flows[flow_idx]["dst"] + rate = self.db_flows[flow_idx]["bitrate"] + bidir = self.db_flows[flow_idx]["bidir"] + flow_list = self.db_flows[flow_idx]["flows"] + band_type = self.db_flows[flow_idx]["band_type"] + slots_init = self.db_flows[flow_idx]["slots"] + fiber_f = self.db_flows[flow_idx]["fiber_forward"] + fiber_b = self.db_flows[flow_idx]["fiber_backward"] + op = self.db_flows[flow_idx]["op-mode"] + num_slots = self.db_flows[flow_idx]["n_slots"] + links = self.db_flows[flow_idx]["links"] + path = self.db_flows[flow_idx]["path"] + band = self.db_flows[flow_idx]["band"] + f0 = self.db_flows[flow_idx]["freq"] + ob_idx = self.db_flows[flow_idx]["parent_opt_band"] + + r1 = "" + r2 = "" + if len(links) == 2: + [t1, r1] = links[0].split("-") + [r2, t2] = links[1].split("-") + else: + return 0, 0 + existing_ob = self.get_optical_bands(r1, r2) + if len(existing_ob) > 0: + print("INFO: Trying to move connection to an existing OB") + #first checking in existing OB + for ob_id in existing_ob: + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if not is_active: + continue + op, num_slots = map_rate_to_slot(rate) + if debug: + print(links) + + c_slots, l_slots, s_slots = self.get_slots(links, num_slots, ob_id) + if debug: + print(c_slots) + print(l_slots) + print(s_slots) + if band_type == "c_slots": + c_slots = [] + elif band_type == "l_slots": + l_slots = [] + elif band_type == "s_slots": + s_slots = [] + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(links, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is None: + continue + slots_i = [] + for i in slots: + slots_i.append(int(i)) + self.db_flows[flow_idx]["flows"] = flow_list + self.db_flows[flow_idx]["band_type"] = band_range + self.db_flows[flow_idx]["slots"] = slots_i + self.db_flows[flow_idx]["fiber_forward"] = fiber_f + self.db_flows[flow_idx]["fiber_backward"] = fiber_b + self.db_flows[flow_idx]["op-mode"] = op + self.db_flows[flow_idx]["n_slots"] = num_slots + #self.db_flows[flow_idx]["links"] = temp_links2 + #self.db_flows[flow_idx]["path"] = temp_path + self.db_flows[flow_idx]["band"] = band + self.db_flows[flow_idx]["freq"] = f0 + self.db_flows[flow_idx]["is_active"] = True + self.db_flows[flow_idx]["parent_opt_band"] = ob_id + self.db_flows[flow_idx]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(flow_idx) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + self.move_flow(flow_idx, slots_init, band_type, links, bidir, ob_idx) + return flow_idx, ob_id + else: + continue + print("not enough slots") + return None, 0 + + def extend_optical_band(self, ob_id, band=None): ob = self.optical_bands[ob_id] links = ob["links"] diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 48cb93c6b..eeb2cd4a6 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -44,8 +44,8 @@ from .task_scheduler.TaskScheduler import TasksScheduler from .tools.GeodesicDistance import gps_distance from .tools.OpticalTools import ( add_flex_lightpath, add_lightpath, delete_lightpath, adapt_reply, get_device_name_from_uuid, - get_optical_band, refresh_opticalcontroller, DelFlexLightpath , extend_optical_band - + get_optical_band, refresh_opticalcontroller, DelFlexLightpath , extend_optical_band, + reconfig_flex_lightpath, adapt_reply_ob ) @@ -276,7 +276,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ports = [] for endpoint_id in service.service_endpoint_ids: endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - if "." in endpoint_device_uuid: + if "." or "MGON" in endpoint_device_uuid: endpoint_device_name = endpoint_device_uuid else: endpoint_device_name = device_names[endpoint_device_uuid] @@ -310,10 +310,24 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): else: reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band) logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") + logging.info(f"POLIMI {reply_txt}") + if reply_txt == None: + return service_with_uuids.service_id reply_json = json.loads(reply_txt) LOGGER.info('[optical] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) optical_band_txt = "" + if "optical_band_id" in reply_json.keys(): + optical_band_txt = reply_txt + + optical_reply = adapt_reply_ob( + devices, _service, reply_json, context_uuid_x, topology_uuid_x, optical_band_txt + ) + + tasks_scheduler.compose_from_opticalcontroller_reply( + optical_reply, is_delete=False) + tasks_scheduler.execute_all() + return service_with_uuids.service_id if "new_optical_band" in reply_json.keys(): if reply_json["new_optical_band"] == 1: if reply_json["parent_opt_band"]: @@ -452,6 +466,27 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) tasks_scheduler.execute_all() return Empty() + elif oc_type ==2 : + if len(service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + service.service_config.config_rules[0].custom.resource_value) + ob_id=None + flow_id=None + + if ("flow_id" in c_rules_dict): + flow_id = c_rules_dict["flow_id"] + #if ("ob_id" in c_rules_dict): + # ob_id = c_rules_dict["ob_id"] + params['bitrate']=bitrate + params['dst']=dst + params['src']=src + params['flow_id']=flow_id + params['bidir'] = bidir + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) + tasks_scheduler.execute_all() + return Empty() + # Normal service # Feed TaskScheduler with this service and the sub-services and sub-connections related to this service. @@ -514,9 +549,11 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): updated_service_with_uuids = get_service_by_id( context_client, updated_service_id_with_uuids, rw_copy=True, include_config_rules=True, include_constraints=True, include_endpoint_ids=True) + LOGGER.info('WYY:{}'.format(updated_service_with_uuids)) # Get active connection connections = context_client.ListConnections(updated_service_id_with_uuids) + LOGGER.info('WWWW:{}'.format(connections)) if len(connections.connections) == 0: MSG = 'Service({:s}) has no connections' str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) @@ -538,83 +575,173 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): str_connection = grpc_message_to_json_string(old_connection) str_extra_details = MSG.format(str_service_id, str_connection_id, str_connection) raise NotImplementedException('service-connection-with-subservices', extra_details=str_extra_details) + + if updated_service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + LOGGER.info('WWWW:{}'.format("is optical")) + context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) + topology_id_x = json_topology_id( + DEFAULT_TOPOLOGY_NAME, context_id_x) + topology_details = context_client.GetTopologyDetails( + TopologyId(**topology_id_x)) + + str_old_connection = connection_to_string(old_connection) + LOGGER.info('WWW old_connection={}'.format(grpc_message_to_json_string(old_connection))) + LOGGER.info('WWW0={}'.format(updated_service_with_uuids.service_config.config_rules)) + if len(updated_service_with_uuids.service_config.config_rules)> 0: + #if len(updated_service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + updated_service_with_uuids.service_config.config_rules[0].custom.resource_value) + + #c_rules_dict = json.loads( + # updated_service.service_config.config_rules[0].custom.resource_value) + LOGGER.info('WWW1:{}'.format(c_rules_dict)) + flow_id=None + #if "ob_id" in c_rules_dict: + # ob_id = c_rules_dict["ob_id"] + if ("flow_id" in c_rules_dict): + flow_id = c_rules_dict["flow_id"] + reply_txt = "" + # to get the reply form the optical module + #multi-granular + reply_txt = reconfig_flex_lightpath(flow_id) + reply_json = json.loads(reply_txt) + LOGGER.info('[optical] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) + devices = topology_details.devices + context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid + topology_uuid_x = topology_details.topology_id.topology_uuid.uuid + + device_names : Dict[str, str] = dict() + for device in devices: + device_uuid = device.device_id.device_uuid.uuid + device_names[device_uuid] = device.name + + if reply_txt is not "": + optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") + new_connection = optical_reply.connections[0] + #for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(new_connection) + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + service_new = optical_reply.services[0] + LOGGER.info('QQQQ:{}'.format(service_new)) + + ''' + if len(service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + service.service_config.config_rules[0].custom.resource_value) + ob_id=None + flow_id=None + if "ob_id" in c_rules_dict: + ob_id = c_rules_dict["ob_id"] + if ("flow_id" in c_rules_dict): + flow_id = c_rules_dict["flow_id"] + #if ("ob_id" in c_rules_dict): + # ob_id = c_rules_dict["ob_id"] + params['bitrate']=bitrate + params['dst']=dst + params['src']=src + params['ob_id']=ob_id + params['flow_id']=flow_id + params['bidir'] = bidir + + + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) + tasks_scheduler.execute_all() + + ''' + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + #tasks_scheduler.compose_optical_service_update( + # updated_service, old_connection, service_new, new_connection) + tasks_scheduler.compose_optical_service_update( + service_new, old_connection, new_connection) + tasks_scheduler.execute_all() - # Find alternative connections - # pylint: disable=no-member - pathcomp_request = PathCompRequest() - pathcomp_request.services.append(updated_service_with_uuids) - #pathcomp_request.k_disjoint_path.num_disjoint = 100 - pathcomp_request.k_shortest_path.k_inspection = 100 - pathcomp_request.k_shortest_path.k_return = 3 - - LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) - pathcomp = PathCompClient() - pathcomp_reply = pathcomp.Compute(pathcomp_request) - pathcomp.close() - LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) - - if len(pathcomp_reply.services) == 0: - MSG = 'KDisjointPath reported no services for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-no-services', extra_details=str_extra_details) + else: + # Find alternative connections + # pylint: disable=no-member + pathcomp_request = PathCompRequest() + pathcomp_request.services.append(updated_service_with_uuids) + #pathcomp_request.k_disjoint_path.num_disjoint = 100 + pathcomp_request.k_shortest_path.k_inspection = 100 + pathcomp_request.k_shortest_path.k_return = 3 + + LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + pathcomp = PathCompClient() + pathcomp_reply = pathcomp.Compute(pathcomp_request) + pathcomp.close() + LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + + if len(pathcomp_reply.services) == 0: + MSG = 'KDisjointPath reported no services for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-services', extra_details=str_extra_details) + + if len(pathcomp_reply.services) > 1: + MSG = 'KDisjointPath reported subservices for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-subservices', extra_details=str_extra_details) + + if len(pathcomp_reply.connections) == 0: + MSG = 'KDisjointPath reported no connections for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-connections', extra_details=str_extra_details) + + # compute a string representing the old connection + str_old_connection = connection_to_string(old_connection) + + LOGGER.debug('old_connection={:s}'.format(grpc_message_to_json_string(old_connection))) + + candidate_new_connections = list() + for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(candidate_new_connection) + if str_candidate_new_connection == str_old_connection: continue + candidate_new_connections.append(candidate_new_connection) + + if len(candidate_new_connections) == 0: + MSG = 'Unable to find a new suitable path: pathcomp_request={:s} pathcomp_reply={:s} old_connection={:s}' + str_pathcomp_request = grpc_message_to_json_string(pathcomp_request) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_old_connection = grpc_message_to_json_string(old_connection) + extra_details = MSG.format(str_pathcomp_request, str_pathcomp_reply, str_old_connection) + raise OperationFailedException('no-new-path-found', extra_details=extra_details) + + str_candidate_new_connections = [ + grpc_message_to_json_string(candidate_new_connection) + for candidate_new_connection in candidate_new_connections + ] + LOGGER.debug('candidate_new_connections={:s}'.format(str(str_candidate_new_connections))) - if len(pathcomp_reply.services) > 1: - MSG = 'KDisjointPath reported subservices for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-subservices', extra_details=str_extra_details) + new_connection = random.choice(candidate_new_connections) + LOGGER.debug('new_connection={:s}'.format(grpc_message_to_json_string(new_connection))) - if len(pathcomp_reply.connections) == 0: - MSG = 'KDisjointPath reported no connections for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-no-connections', extra_details=str_extra_details) - - # compute a string representing the old connection - str_old_connection = connection_to_string(old_connection) - - LOGGER.debug('old_connection={:s}'.format(grpc_message_to_json_string(old_connection))) - - candidate_new_connections = list() - for candidate_new_connection in pathcomp_reply.connections: - str_candidate_new_connection = connection_to_string(candidate_new_connection) - if str_candidate_new_connection == str_old_connection: continue - candidate_new_connections.append(candidate_new_connection) - - if len(candidate_new_connections) == 0: - MSG = 'Unable to find a new suitable path: pathcomp_request={:s} pathcomp_reply={:s} old_connection={:s}' - str_pathcomp_request = grpc_message_to_json_string(pathcomp_request) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_old_connection = grpc_message_to_json_string(old_connection) - extra_details = MSG.format(str_pathcomp_request, str_pathcomp_reply, str_old_connection) - raise OperationFailedException('no-new-path-found', extra_details=extra_details) - - str_candidate_new_connections = [ - grpc_message_to_json_string(candidate_new_connection) - for candidate_new_connection in candidate_new_connections - ] - LOGGER.debug('candidate_new_connections={:s}'.format(str(str_candidate_new_connections))) - - new_connection = random.choice(candidate_new_connections) - LOGGER.debug('new_connection={:s}'.format(grpc_message_to_json_string(new_connection))) - - # Change UUID of new connection to prevent collisions - tmp_connection = Connection() - tmp_connection.CopyFrom(new_connection) - tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) - new_connection = tmp_connection - - # Feed TaskScheduler with the service to update, the old connection to - # deconfigure and the new connection to configure. It will produce a - # schedule of tasks (an ordered list of tasks to be executed) to - # implement the requested changes. - tasks_scheduler = TasksScheduler(self.service_handler_factory) - tasks_scheduler.compose_service_connection_update( - updated_service_with_uuids, old_connection, new_connection) - tasks_scheduler.execute_all() + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_service_connection_update( + updated_service_with_uuids, old_connection, new_connection) + tasks_scheduler.execute_all() return Empty() diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index 7f47f65fb..919fcb63e 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -34,7 +34,8 @@ from .tasks.Task_ServiceDelete import Task_ServiceDelete from .tasks.Task_ServiceSetStatus import Task_ServiceSetStatus from .TaskExecutor import CacheableObjectType, TaskExecutor from .tasks.Task_OpticalServiceConfigDelete import Task_OpticalServiceConfigDelete -from service.service.tools.OpticalTools import delete_lightpath +from service.service.tools.OpticalTools import DelFlexLightpath, delete_lightpath +from common.Constants import OpticalServiceType if TYPE_CHECKING: from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory @@ -176,6 +177,30 @@ class TasksScheduler: return optical_connection_configure_key + def _optical_connection_configure_simple(self, connection_id : ConnectionId + , service_id : ServiceId , + has_media_channel : bool, has_optical_band = True) -> str: + optical_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, connection_id)) + + ''' + # the connection configuration depends on its connection's service being in planning state + service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED)) + self._dag.add(optical_connection_configure_key, service_planned_key) + ''' + + + # the connection's service depends on the connection configuration to transition to active state + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE)) + self._dag.add(service_active_key, optical_connection_configure_key) + + + return optical_connection_configure_key + + + def _optical_connection_deconfigure( self, connection_id : ConnectionId, service_id : ServiceId, has_media_channel : bool, has_optical_band = True @@ -196,6 +221,30 @@ class TasksScheduler: self._dag.add(service_delete_key, connection_deconfigure_key) return connection_deconfigure_key + + def _optical_connection_deconfigure_simple( + self, connection_id : ConnectionId, service_id : ServiceId, + has_media_channel : bool, has_optical_band = True + ) -> str: + connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + self._executor, connection_id, has_media_channel=has_media_channel + )) + ''' + # the connection deconfiguration depends on its connection's service being in removing state + service_pending_removal_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + self._dag.add(connection_deconfigure_key, service_pending_removal_key) + + + service_delete_key = self._add_task_if_not_exists(Task_OpticalServiceDelete( + self._executor, service_id, has_media_channel, has_optical_band + )) + self._dag.add(service_delete_key, connection_deconfigure_key) + ''' + + return connection_deconfigure_key + def _optical_service_config_remove( self, connection_id : ConnectionId, service_id : ServiceId @@ -396,20 +445,26 @@ class TasksScheduler: if str_item_key in explored_items: continue connections = self._context_client.ListConnections(item.service_id) has_media_channel,has_optical_band=self.check_service_for_media_channel(connections=connections,item=item.service_id) - + oc_type = 1 if len(service.service_config.config_rules) > 0: - - - reply,code = delete_lightpath( - params['src'] - ,params ['dst'] - , params['bitrate'] - , params['ob_id'] - ,delete_band=not has_media_channel - , flow_id= params['flow_id'] - ) + for constraint in service.service_constraints: + if "type" in constraint.custom.constraint_type: + oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) + if oc_type == 2 : + #flow_id, src, dst, bitrate + reply,code = delete_lightpath(params['flow_id'], params['src'], params['dst'], params['bitrate']) - + else: + reply,code = DelFlexLightpath( + params['src'] + , params ['dst'] + , params['bitrate'] + , params['ob_id'] + , delete_band=not has_media_channel + , flow_id= params['flow_id'] + ) + + if code == 400 and reply_not_allowed in reply : MSG = 'Deleteion for the service is not Allowed , Served Lightpaths is not empty' raise Exception(MSG) @@ -443,22 +498,19 @@ class TasksScheduler: self._add_connection_to_executor_cache(connection) pending_items_to_explore.put(connection) - - - - + explored_items.add(str_item_key) elif isinstance(item, Connection): - - if code == 400 and reply_not_allowed in reply:break - str_item_key = grpc_message_to_json_string(item.connection_id) if str_item_key in explored_items: continue - connection_key = include_connection(item.connection_id, item.service_id,has_media_channel=has_media_channel,has_optical_band=has_optical_band) + connection_key = include_connection( item.connection_id + , item.service_id + , has_media_channel=has_media_channel + , has_optical_band=has_optical_band ) self._add_connection_to_executor_cache(connection) if include_service_config is not None : @@ -469,9 +521,7 @@ class TasksScheduler: if has_optical_band and is_media_channel: include_service_config(item.connection_id - , item.service_id - - ) + , item.service_id ) self._executor.get_service(item.service_id) @@ -479,7 +529,9 @@ class TasksScheduler: for sub_service_id in item.sub_service_ids: - _,service_key_done = include_service(sub_service_id,has_media_channel=has_media_channel,has_optical_band=has_optical_band) + _,service_key_done = include_service(sub_service_id + ,has_media_channel=has_media_channel + ,has_optical_band=has_optical_band) self._executor.get_service(sub_service_id) self._dag.add(service_key_done, connection_key) pending_items_to_explore.put(sub_service_id) @@ -497,6 +549,7 @@ class TasksScheduler: LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + def compose_from_service(self, service : Service, is_delete : bool = False) -> None: t0 = time.time() include_service = self._service_remove if is_delete else self._service_create @@ -560,6 +613,96 @@ class TasksScheduler: t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + + def compose_optical_service_update( + self, service : Service, old_connection : Connection, new_connection : Connection + ) -> None: + t0 = time.time() + + self._add_service_to_executor_cache(service) + self._add_connection_to_executor_cache(old_connection) + self._add_connection_to_executor_cache(new_connection) + + service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING + )) + + old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + self._executor, old_connection.connection_id, old_connection.service_id + )) + + new_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, new_connection.connection_id + )) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + + # the old connection deconfiguration depends on service being in updating state + self._dag.add(old_connection_deconfigure_key, service_updating_key) + + # the new connection configuration depends on service being in updating state + self._dag.add(new_connection_configure_key, service_updating_key) + + # the new connection configuration depends on the old connection having been deconfigured + self._dag.add(new_connection_configure_key, old_connection_deconfigure_key) + + # re-activating the service depends on the service being in updating state before + self._dag.add(service_active_key, service_updating_key) + + # re-activating the service depends on the new connection having been configured + self._dag.add(service_active_key, new_connection_configure_key) + + t1 = time.time() + LOGGER.debug('[RRERRSF] elapsed_time: {:f} sec'.format(t1-t0)) + + + def compose_optical_service_update1( + self, service : Service, old_connection : Connection, new_connection : Connection + ) -> None: + LOGGER.debug('[ttttttttttt] elapsed_time inside update1') + t0 = time.time() + + self._add_service_to_executor_cache(service) + #self._add_connection_to_executor_cache(old_connection) + self._add_connection_to_executor_cache(new_connection) + + service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING + )) + + #old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + # self._executor, old_connection.connection_id, old_connection.service_id + #)) + + new_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, new_connection.connection_id + )) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + + # the old connection deconfiguration depends on service being in updating state + #self._dag.add(old_connection_deconfigure_key, service_updating_key) + + # the new connection configuration depends on service being in updating state + self._dag.add(new_connection_configure_key, service_updating_key) + + # the new connection configuration depends on the old connection having been deconfigured + #self._dag.add(new_connection_configure_key, old_connection_deconfigure_key) + + # re-activating the service depends on the service being in updating state before + self._dag.add(service_active_key, service_updating_key) + + # re-activating the service depends on the new connection having been configured + self._dag.add(service_active_key, new_connection_configure_key) + + t1 = time.time() + LOGGER.debug('[RRERRSF] elapsed_time: {:f} sec'.format(t1-t0)) + + def compose_service_connection_update( self, service : Service, old_connection : Connection, new_connection : Connection ) -> None: diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 55bb6d242..1a44f4812 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -120,6 +120,23 @@ def refresh_opticalcontroller(topology_id : dict): log.debug(f"GetTopology Response {res}") +def reconfig_flex_lightpath(flow_id) -> str: + if not TESTING: + urlx = "" + headers = {"Content-Type": "application/json"} + base_url = get_optical_controller_base_url() + urlx = "{:s}/ReconfigFlexLightpath/{}".format(base_url, flow_id) + r = requests.put(urlx, headers=headers) + print(f"reconfig {r}") + reply = r.text + return reply + else: + if bidir is not None: + if bidir == 0: + return reply_uni_txt + return reply_bid_txt + + def add_flex_lightpath(src, dst, bitrate, bidir, ob_band) -> str: if not TESTING: urlx = "" @@ -177,7 +194,7 @@ def get_optical_band(idx) -> str: return optical_band_uni_txt -def delete_lightpath( src, dst, bitrate, ob_id, delete_band, flow_id=None) -> str: +def DelFlexLightpath( src, dst, bitrate, ob_id, delete_band, flow_id=None) -> str: reply = "200" delete_band = 1 if delete_band else 0 base_url = get_optical_controller_base_url() @@ -194,15 +211,16 @@ def delete_lightpath( src, dst, bitrate, ob_id, delete_band, flow_id=None) -> st code = r.status_code return (reply, code) -def DelFlexLightpath (flow_id, src, dst, bitrate, o_band_id): +def delete_lightpath(flow_id, src, dst, bitrate): reply = "200" base_url = get_optical_controller_base_url() if not TESTING: - urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}/{}".format(base_url, flow_id, src, dst, bitrate, o_band_id) + urlx = "{:s}/DelLightpath/{}/{}/{}/{}".format(base_url, flow_id, src, dst, bitrate) headers = {"Content-Type": "application/json"} r = requests.delete(urlx, headers=headers) reply = r.text - return reply + code = r.status_code + return (reply, code) def get_lightpaths() -> str: base_url = get_optical_controller_base_url() @@ -213,6 +231,135 @@ def get_lightpaths() -> str: reply = r.text return reply +def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optical_band_txt) -> PathCompReply: + opt_reply = PathCompReply() + topo = TopologyId( + context_id=ContextId(context_uuid=Uuid(uuid=context_id)), + topology_uuid=Uuid(uuid=topology_id) + ) + #add optical band connection first + rules_ob= [] + ob_id = 0 + connection_ob=None + + r = reply_json + if "optical_band_id" in r.keys(): + ob_id = r["optical_band_id"] + if "bidir" in r.keys(): + bidir_f = r["bidir"] + else: + bidir_f = False + if optical_band_txt != "": + ob_json = json.loads(optical_band_txt) + ob = ob_json + connection_ob = add_connection_to_reply(opt_reply) + uuuid_x = str(uuid.uuid4()) + connection_ob.connection_id.connection_uuid.uuid = uuuid_x + connection_ob.service_id.CopyFrom(service.service_id) + obt = ob["band_type"] + if obt == "l_slots": + band_type = "L_BAND" + elif obt == "s_slots": + band_type = "S_BAND" + else: + band_type = "C_BAND" + + freq = ob["freq"] + bx = ob["band"] + #+1 is added to avoid overlap in the WSS of MGONs + lf = int(int(freq)-int(bx/2))+1 + uf = int(int(freq)+int(bx/2)) + val_ob = { + "band_type" : band_type, + "low-freq" : lf, + "up-freq" : uf, + "frequency" : freq, + "band" : bx, + "ob_id" : ob_id, + "bidir" : bidir_f + } + rules_ob.append(ConfigRule_Custom(resource_key="/settings-ob_{}".format(uuuid_x), resource_value=json.dumps(val_ob))) + bidir_ob = ob["bidir"] + # in case the service is built upon existed optical band , don't clacluate the endpoints of it + for devxb in ob["flows"].keys(): + log.debug("optical-band device {}".format(devxb)) + in_end_point_b = "0" + out_end_point_b = "0" + in_end_point_f = ob["flows"][devxb]["f"]["in"] + out_end_point_f = ob["flows"][devxb]["f"]["out"] + log.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) + if bidir_ob: + in_end_point_b = ob["flows"][devxb]["b"]["in"] + out_end_point_b = ob["flows"][devxb]["b"]["out"] + log.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) + #if (in_end_point_f == "0" or out_end_point_f == "0") and (in_end_point_b == "0" or out_end_point_b == "0"): + if in_end_point_f != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_f) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) + + if out_end_point_f != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_f) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) + if in_end_point_b != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_b) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) + if out_end_point_b != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_b) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) + log.debug("optical-band connection {}".format(connection_ob)) + #check that list of endpoints is not empty + if connection_ob is not None and len(connection_ob.path_hops_endpoint_ids) == 0: + log.debug("deleting empty optical-band connection") + opt_reply.connections.remove(connection_ob) + + ''' + #inizialize custom optical parameters + band = r["band"] if "band" in r else None + op_mode = r["op-mode"] if "op-mode" in r else None + frequency = r["freq"] if "freq" in r else None + flow_id = r["flow_id"] if "flow_id" in r else None + r_type = r["band_type"] if "band_type" in r else None + if r_type == "l_slots": + band_type = "L_BAND" + elif r_type == "s_slots": + band_type = "S_BAND" + else: + band_type = "C_BAND" + if ob_id != 0: + val = {"target-output-power": "1.0", "frequency": frequency, "operational-mode": op_mode, "band": band, "flow_id": flow_id, "ob_id": ob_id, "band_type": band_type, "bidir": bidir_f} + else: + val = {"target-output-power": "1.0", "frequency": frequency, "operational-mode": op_mode, "band": band, "flow_id": flow_id, "band_type": band_type, "bidir": bidir_f} + custom_rule = ConfigRule_Custom(resource_key="/settings", resource_value=json.dumps(val)) + rule = ConfigRule(action=ConfigActionEnum.CONFIGACTION_SET, custom=custom_rule) + service.service_config.config_rules.add().CopyFrom(rule) + ''' + + if len(rules_ob) > 0: + for rulex in rules_ob: + rule_ob = ConfigRule(action=ConfigActionEnum.CONFIGACTION_SET, custom=rulex) + service.service_config.config_rules.add().CopyFrom(rule_ob) + + opt_reply.services.add().CopyFrom(service) + return opt_reply + + + def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_band_txt) -> PathCompReply: opt_reply = PathCompReply() topo = TopologyId( diff --git a/src/service/tests/test_recon.py b/src/service/tests/test_recon.py new file mode 100644 index 000000000..97acdf7e2 --- /dev/null +++ b/src/service/tests/test_recon.py @@ -0,0 +1,101 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Service +#from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'src/service/tests/descriptors_recompute_conns.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() + + +def test_service_recompute_connection( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +) -> None: + + # ===== Setup scenario ============================================================================================= + #validate_empty_scenario(context_client) + + # Load descriptors and validate the base scenario + #descriptor_loader = DescriptorLoader( + # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + # service_client=service_client) + #results = descriptor_loader.process() + #check_descriptor_load_results(results, descriptor_loader) + #descriptor_loader.validate() + + + # ===== Recompute Connection ======================================================================================= + response = context_client.ListServices(ADMIN_CONTEXT_ID) + print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + for service in response.services: + #service = response.services[0] + service_id = service.service_id + name = service.name + print(name) + + if name == "optical-connection1": + response = context_client.ListConnections(service_id) + print("AAAAAAAAA") + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # 1 connection per service + str_old_connections = grpc_message_to_json_string(response) + + # Change path first time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # 1 connection per service + str_new_connections = grpc_message_to_json_string(response) + print(' new connection => {:s}'.format(str_new_connections)) + + -- GitLab From 6ef57ac5534c91fe68fdaba47edc04677f903850 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Thu, 23 Oct 2025 23:19:59 +0200 Subject: [PATCH 03/41] bugfix delete empty service --- src/service/service/ServiceServiceServicerImpl.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index eeb2cd4a6..5aa56c7e7 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -416,7 +416,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): 'flow_id' : None } devs = [] - + src = "" + dst = "" context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) topology_id_x = json_topology_id( DEFAULT_TOPOLOGY_NAME, context_id_x) @@ -425,8 +426,11 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): devices = topology_details.devices for endpoint_id in service.service_endpoint_ids: devs.append(endpoint_id.device_id.device_uuid.uuid) - src = get_device_name_from_uuid(devices, devs[0]) - dst = get_device_name_from_uuid(devices, devs[1]) + if len(devs) == 2: + src = get_device_name_from_uuid(devices, devs[0]) + dst = get_device_name_from_uuid(devices, devs[1]) + else: + print("empty service") bitrate = 100 bidir = 0 oc_type = 1 -- GitLab From dfa3e03563c1eb92acb832967dc669648d7e48dc Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Fri, 31 Oct 2025 17:06:32 +0100 Subject: [PATCH 04/41] working with disjoint and multiple bands --- src/opticalcontroller/OpticalController.py | 14 +- src/opticalcontroller/RSA.py | 62 ++++++- src/opticalcontroller/dijkstra.py | 169 ++++++++++++++++-- .../service/ServiceServiceServicerImpl.py | 14 +- src/service/service/tools/OpticalTools.py | 8 +- 5 files changed, 233 insertions(+), 34 deletions(-) diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index 075c8a42b..042461f5e 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -70,23 +70,25 @@ class AddLightpath(Resource): #@optical.route('/AddFlexLightpath///') @optical.route('/AddFlexLightpath///', - defaults={"bidir": 1, "band": None}) + defaults={"bidir": 1, "band": None, "obx_idx": None}) @optical.route('/AddFlexLightpath////', - defaults={"band": None}) -@optical.route('/AddFlexLightpath/////',) + defaults={"band": None, "obx_idx": None}) +@optical.route('/AddFlexLightpath/////', + defaults={"obx_idx": None}) +@optical.route('/AddFlexLightpath//////') @optical.response(200, 'Success') @optical.response(404, 'Error, not found') class AddFlexLightpath(Resource): @staticmethod - def put(src, dst, bitrate, bidir=1, band=None): + def put(src, dst, bitrate, bidir=1, band=None, obx_idx = None): - print("INFO: New FlexLightpath request from {} to {} with rate {} and band {}".format(src, dst, bitrate, band)) + print("INFO: New MGON request from {} to {} with rate {} and band {}".format(src, dst, bitrate, band)) t0 = time.time()*1000.0 #if debug: # rsa.g.printGraph() if rsa is not None: - flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band) + flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band, obx_idx) if flow_id is not None: if rsa.db_flows[flow_id]["op-mode"] == 0: return 'No path found', 404 diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 861afeb01..1dedc2b77 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -147,7 +147,31 @@ class RSA(): self.g.reset_graph() return links, path - def get_slots(self, links, slots, optical_band_id=None): + def compute_disjoint_path(self, src, dst, path1=None): + if path1 == None: + path1 = shortest_path(self.g, self.g.get_vertex(src), self.g.get_vertex(dst)) + path = disjoint_path(self.g, src, dst, path1, False) + print("INFO: Path from {} to {} with distance: {}".format(src, dst, self.g.get_vertex(dst).get_distance())) + if debug: + print(path) + links = [] + for i in range(0, len(path) - 1): + s = path[i] + if debug: + print(s) + if i < len(path) - 1: + d = path[i + 1] + link_id = "{}-{}".format(s, d) + if debug: + #print(link_id, self.links_dict[link_id]) + print(link_id, self.get_link_by_name(link_id)) + + links.append(link_id) + self.g.reset_graph() + return links, path + + + def get_slots(self, links, slots, optical_band_id=None, old_band_x=None): if isinstance(slots, int): val_c = slots @@ -246,6 +270,7 @@ class RSA(): l_slots[l] = combine(l_slots[l], consecutives(fib["l_slots"], val_l)) l_found = 1''' if optical_band_id is not None: + print(f"NEW_DISJOINT: {self.optical_bands[optical_band_id]}") if "c_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["c_slots"]) > 0: a_c = c_sts @@ -269,7 +294,15 @@ class RSA(): s_sts = common_slots(a_s, b_s) else: s_sts = [] - + if old_band_x == "c_slots": + c_sts = [] + l_sts = [] + if old_band_x == "l_slots": + c_sts = [] + l_sts = [] + if old_band_x == "s_slots": + s_sts = [] + return c_sts, l_sts, s_sts def update_link(self, fib, slots, band): @@ -336,7 +369,7 @@ class RSA(): #update_optical_band(optical_bands=self.optical_bands,optical_band_id=optical_band_id,band=band,link=link) - def del_flow(self, flow,flow_id, o_b_id = None): + def del_flow(self, flow, flow_id, o_b_id = None): flows = flow["flows"] band = flow["band_type"] slots = flow["slots"] @@ -839,7 +872,7 @@ class RSA(): #self.db_flows[flow_id]["parent_opt_band"] = 0 #self.db_flows[flow_id]["new_optical_band"] = 0 - def create_optical_band(self, links, path, bidir, num_slots): + def create_optical_band(self, links, path, bidir, num_slots, old_band_x=None): print("INFO: Creating optical-band of {} slots".format(num_slots)) if self.opt_band_id == 0: self.opt_band_id += 1 @@ -888,8 +921,7 @@ class RSA(): if bidir: self.optical_bands[back_opt_band_id]["src"] = path[-1] ''' - - c_slots, l_slots, s_slots = self.get_slots(links, num_slots) + c_slots, l_slots, s_slots = self.get_slots(links, num_slots, optical_band_id=None, old_band_x=old_band_x) if debug: print(c_slots) print(l_slots) @@ -991,7 +1023,7 @@ class RSA(): result.append(ob_id) return result - def rsa_fs_computation(self, src, dst, rate, bidir, band): + def rsa_fs_computation(self, src, dst, rate, bidir, band, bandx_id): if band is not None: num_slots_ob = map_band_to_slot(band) print(band, num_slots_ob) @@ -999,11 +1031,23 @@ class RSA(): num_slots_ob = "full_band" if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM": print("INFO: ROADM to ROADM connection") - links, path = self.compute_path(src, dst) + old_band_x = None + if bandx_id != None: + if bandx_id in self.optical_bands.keys(): + path_x = self.optical_bands[bandx_id]["path"] + old_band_x = self.optical_bands[bandx_id]["band_type"] + links, path = self.compute_disjoint_path(src, dst, path_x) + else: + links, path = self.compute_disjoint_path(src, dst, None) + if len(path) < 1: + print("INFO: no disjoint path found, installing in the shortest path") + links, path = self.compute_path(src, dst) + else: + links, path = self.compute_path(src, dst) if len(path) < 1: self.null_values_ob(self.opt_band_id) return self.opt_band_id, [] - optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob) + optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob, old_band_x) return None, optical_band_id print("INFO: TP to TP connection") if self.flow_id == 0: diff --git a/src/opticalcontroller/dijkstra.py b/src/opticalcontroller/dijkstra.py index 2657990cf..68180e427 100644 --- a/src/opticalcontroller/dijkstra.py +++ b/src/opticalcontroller/dijkstra.py @@ -129,9 +129,20 @@ class Graph: self.vert_dict[frm].add_neighbor(self.vert_dict[to], [port_frm, w]) self.vert_dict[to].add_neighbor(self.vert_dict[frm], [port_to, w]) + ''' def del_edge(self, frm, to, cost = 0): self.vert_dict[frm].del_neighbor(self.vert_dict[to]) self.vert_dict[to].del_neighbor(self.vert_dict[frm]) + ''' + + def del_edge(self, frm, to, cost=0): + if frm in self.vert_dict and to in self.vert_dict: + v_from = self.vert_dict[frm] + v_to = self.vert_dict[to] + if v_to in v_from.adjacent: + v_from.del_neighbor(v_to) + if v_from in v_to.adjacent: + v_to.del_neighbor(v_from) def get_vertices(self): return self.vert_dict.keys() @@ -142,6 +153,45 @@ class Graph: def get_previous(self, current): return self.previous + def copy(self): + """ + Returns a deep copy of the graph (vertices, edges, ports, and weights). + """ + new_graph = Graph() + + # First, create all vertices + for node_id in self.vert_dict: + new_graph.add_vertex(node_id) + + # Then, add all edges with the same attributes + for v in self: + for neighbor in v.get_connections(): + frm = v.get_id() + to = neighbor.get_id() + port_frm = v.get_port(neighbor) + port_to = neighbor.get_port(v) + weight = v.get_weight(neighbor) + + # To avoid adding the same undirected edge twice + if frm < to: + new_graph.add_edge(frm, to, port_frm, port_to, weight) + + return new_graph + + def copy2(self): + new_g = Graph() + # Copy vertices + for node_id in self.vert_dict: + new_g.add_vertex(node_id) + # Copy edges + for frm in self.vert_dict: + for to in self.vert_dict[frm].adjacent: + port_frm, weight = self.vert_dict[frm].adjacent[to] + port_to, _ = self.vert_dict[to].adjacent[frm] + if not new_g.get_vertex(frm).adjacent.get(new_g.get_vertex(to)): + new_g.add_edge(frm, to.get_id(), port_frm, port_to, weight) + return new_g + def shortest(v, path): if v.previous: path.append(v.previous.get_id()) @@ -198,6 +248,90 @@ def shortest_path(graph, src, dst): shortest(target, path) return path[::-1] + +def compute_disjoint_paths(graph, src, dst, k=2, disjoint_type="link", debug=False): + """ + Compute up to k disjoint shortest paths between src and dst using Dijkstra. + disjoint_type: "link" (edge-disjoint) or "node" (vertex-disjoint) + """ + + paths = [] + removed_edges = [] # Keep track of removed edges + removed_nodes = [] # Keep track of removed nodes + + for i in range(k): + # Compute shortest path using the existing Dijkstra-based function + path = shortest_path(graph, src, dst) + + # Stop if no valid path found + if not path or len(path) < 2: + if debug: + print(f"[INFO] No more disjoint paths found after {i} iterations.") + break + + paths.append(path) + if debug: + print(f"[INFO] Path {i+1}: {path}") + + # Depending on disjointness type, remove edges or nodes from graph + if disjoint_type == "link": + for u, v in zip(path[:-1], path[1:]): + if debug: + print(f" Removing edge {u}-{v}") + removed_edges.append((u, v)) + # Remove edge in both directions + graph.del_edge(u, v) + + elif disjoint_type == "node": + # Remove intermediate nodes (not source or destination) + for n in path[1:-1]: + if debug: + print(f" Removing node {n}") + removed_nodes.append(n) + # Remove all edges involving this node + v = graph.get_vertex(n) + if v is not None: + for neighbor in list(v.get_connections()): + graph.del_edge(n, neighbor.get_id()) + graph.del_Vertex(n) + else: + raise ValueError("disjoint_type must be 'link' or 'node'") + + # Reset distances & visited flags for the next run + graph.reset_graph() + + if debug: + print(f"[INFO] Found {len(paths)} disjoint paths.") + + return paths + +def disjoint_path(graph, src_id, dst_id, pathz, debug=False): + g2 = graph.copy() + src = g2.get_vertex(src_id) + dst = g2.get_vertex(dst_id) + removed_edges = [] # Keep track of removed edges + removed_nodes = [] # Keep track of removed nodes + for u, v in zip(pathz[:-1], pathz[1:]): + if debug: + print(f" Removing edge {u}-{v}") + removed_edges.append((u, v)) + # Remove edge in both directions + g2.del_edge(u, v) + # Compute shortest path using the existing Dijkstra-based function + g2.reset_graph() + pathx = shortest_path(g2, src, dst) + + # Stop if no valid path found + if not pathx or len(pathx) < 2: + if debug: + print(f"[INFO] No more disjoint paths found.") + return [] + + g2.reset_graph() + + return pathx + + if __name__ == '__main__': print("Testing Algo") @@ -210,15 +344,15 @@ if __name__ == '__main__': g.add_vertex('e') g.add_vertex('f') - g.add_edge('a', 'b', 7) - g.add_edge('a', 'c', 9) - g.add_edge('a', 'f', 14) - g.add_edge('b', 'c', 10) - g.add_edge('b', 'd', 15) - g.add_edge('c', 'd', 11) - g.add_edge('c', 'f', 2) - g.add_edge('d', 'e', 6) - g.add_edge('e', 'f', 9) + g.add_edge('a', 'b', 1, 1, 7) + g.add_edge('a', 'c', 2, 1, 9) + g.add_edge('a', 'f', 3, 1, 14) + g.add_edge('b', 'c', 2, 2, 10) + g.add_edge('b', 'd', 3, 1, 15) + g.add_edge('c', 'd', 3, 2, 11) + g.add_edge('c', 'f', 4, 2, 2) + g.add_edge('d', 'e', 4, 1, 6) + g.add_edge('e', 'f', 2, 3, 9) """print ('Graph data:') @@ -235,6 +369,17 @@ if __name__ == '__main__': path = [target.get_id()] shortest(target, path) print ('The shortest path : %s' %(path[::-1]))""" - - p = shortest_path(g, g.get_vertex('a'), g.get_vertex('e')) - print(p) + #print(g.printGraph()) + pat = shortest_path(g, g.get_vertex('a'), g.get_vertex('e')) + print(pat) + + #paths = compute_disjoint_paths(g, g.get_vertex('a'), g.get_vertex('e'), k=2, disjoint_type="link", debug=False) + #paths = compute_disjoint_paths(g, g.get_vertex('a'), g.get_vertex('e'), k=2, disjoint_type="link", debug=False) + #print(paths) + path2 = compute_disjoint_path(g, 'a', 'e', pat, False) + print(path2) + + pat = shortest_path(g, g.get_vertex('a'), g.get_vertex('d')) + print(pat) + path2 = compute_disjoint_path(g, 'a', 'd', pat, False) + print(path2) \ No newline at end of file diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 5aa56c7e7..0c0f0f0dc 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -288,6 +288,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ob_band = None oc_type = 1 bitrate = 100 + dj_optical_band_id = None for constraint in service.service_constraints: if "bandwidth" in constraint.custom.constraint_type: bitrate = int(float(constraint.custom.constraint_value)) @@ -296,19 +297,22 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): elif "optical-band-width" in constraint.custom.constraint_type: ob_band = int(constraint.custom.constraint_value) elif "type" in constraint.custom.constraint_type: - logging.info(f"TEEEEEEEEEEEEEEST {constraint.custom.constraint_type}={constraint.custom.constraint_value}") + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) - logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") - + logging.info(f"{oc_type}") + elif "disjoint_optical_band_id" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + dj_optical_band_id = int(constraint.custom.constraint_value) + logging.info(f"{dj_optical_band_id}") reply_txt = "" # to get the reply form the optical module #multi-granular if oc_type == 1: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band) + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) elif oc_type == 2: reply_txt = add_lightpath(src, dst, bitrate, bidir) else: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band) + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") logging.info(f"POLIMI {reply_txt}") if reply_txt == None: diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 1a44f4812..d5219fe52 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -137,11 +137,12 @@ def reconfig_flex_lightpath(flow_id) -> str: return reply_bid_txt -def add_flex_lightpath(src, dst, bitrate, bidir, ob_band) -> str: +def add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) -> str: if not TESTING: urlx = "" headers = {"Content-Type": "application/json"} base_url = get_optical_controller_base_url() + if ob_band is None: if bidir is None: bidir = 1 @@ -149,7 +150,10 @@ def add_flex_lightpath(src, dst, bitrate, bidir, ob_band) -> str: else: if bidir is None: bidir = 1 - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band)) + if dj_optical_band_id is None: + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band)) + else: + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band), str(dj_optical_band_id)) r = requests.put(urlx, headers=headers) print(f"addpathlight {r}") reply = r.text -- GitLab From 71b2c55f7dae0c43dece90282fb21d6784fbfc37 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Sun, 2 Nov 2025 21:05:52 +0100 Subject: [PATCH 05/41] working unidir with bug fix --- src/opticalcontroller/RSA.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 1dedc2b77..38dee2e92 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -1236,7 +1236,7 @@ class RSA(): print("INFO: RSA completed for FLex Lightpath with new OB") if flow_list is None: self.null_values(self.flow_id) - return flow_id, optical_band_id + return self.flow_id, optical_band_id slots_i = [] for i in slots: slots_i.append(int(i)) -- GitLab From 0cd1a98366c986001b79449ca8b06171a127b6a8 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Tue, 4 Nov 2025 09:38:59 +0100 Subject: [PATCH 06/41] not working bidir --- my_deploy.sh | 4 +-- src/context/Dockerfile | 2 +- src/opticalcontroller/RSA.py | 30 ++++++++++++------ src/service/Dockerfile | 2 +- .../service/ServiceServiceServicerImpl.py | 31 ++++--------------- .../service_handlers/oc/OCServiceHandler.py | 1 + .../service/task_scheduler/TaskScheduler.py | 14 ++++++--- src/service/service/tools/OpticalTools.py | 8 ++--- 8 files changed, 45 insertions(+), 47 deletions(-) diff --git a/my_deploy.sh b/my_deploy.sh index 86c1a86f4..90fe39c75 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -20,7 +20,7 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device pathcomp service nbi webui" +export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" # Uncomment to activate Monitoring (old) #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" @@ -140,7 +140,7 @@ export CRDB_PASSWORD="tfs123" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" diff --git a/src/context/Dockerfile b/src/context/Dockerfile index add63fe65..09c4b6115 100644 --- a/src/context/Dockerfile +++ b/src/context/Dockerfile @@ -28,7 +28,7 @@ ENV PYTHONUNBUFFERED=0 # chmod +x /bin/grpc_health_probe # Get generic Python packages -RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade 'pip==25.2' RUN python3 -m pip install --upgrade setuptools wheel RUN python3 -m pip install --upgrade pip-tools diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 04f67bf0e..f1d2a1a0b 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -16,10 +16,13 @@ import logging from opticalcontroller.dijkstra import * from opticalcontroller.tools import * from opticalcontroller.variables import * + +''' LOGGER = logging.getLogger(__name__) def print(*args) -> None: LOGGER.info(' '.join([str(a) for a in args])) +''' class RSA(): def __init__(self, nodes, links): @@ -470,9 +473,9 @@ class RSA(): print(f"del_flow_fib {fib } and band {band}") print(f"del_flow { str_list_to_int(fib[band].keys())}") - print(f"invoking restore_link_2 fib: {fib} , slots {slots} , band {band} ") - #self.restore_link(fib, slots, band) - self.restore_link_2(fib, slots, band, link = link) + print(f"invoking restore_link fib: {fib} , slots {slots} , band {band} ") + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link = link) self.optical_bands[o_b_id]["is_active"]=False @@ -492,7 +495,8 @@ class RSA(): fib = rlink["optical_details"] #fib = self.get_link_by_name(r_l)["optical_details"] if list_in_list(slots, str_list_to_int(fib[band].keys())): - self.restore_link_2(fib, slots, band, link=rlink) + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=rlink) if debug: print(fib[band]) #changed according to TFS development @@ -1261,8 +1265,8 @@ class RSA(): for l in links: link = self.get_link_by_name(l) fib = link["optical_details"] - #self.restore_link(fib, slots, band) - self.restore_link_2(fib, slots, band, link=link) + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=link) if o_b_id is not None: if debug: print("restoring OB") @@ -1282,8 +1286,8 @@ class RSA(): fib = rlink["optical_details"] #fib = self.get_link_by_name(r_l)["optical_details"] if list_in_list(slots, str_list_to_int(fib[band].keys())): - #self.restore_link(fib, slots, band, link=l) - self.restore_link_2(fib, slots, band, link=rlink) + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=rlink) if debug: print(fib[band]) @@ -1291,7 +1295,7 @@ class RSA(): def rsa_fs_recomputation(self, flow_idy): flow_idx = int(flow_idy) - print(f"INFO: Reconifguring connection {flow_idx}") + print(f"INFO: Reconfiguring connection {flow_idx}") if flow_idx not in self.db_flows.keys(): print(f"ERROR: key not present {flow_idx}") else: @@ -1336,15 +1340,23 @@ class RSA(): c_slots, l_slots, s_slots = self.get_slots(links, num_slots, ob_id) if debug: + print("OFC26 available slots pre") print(c_slots) print(l_slots) print(s_slots) if band_type == "c_slots": c_slots = [] + l_slots =[] elif band_type == "l_slots": + c_slots = [] l_slots = [] elif band_type == "s_slots": s_slots = [] + if debug: + print("OFC26 available slots after reset due to band") + print(c_slots) + print(l_slots) + print(s_slots) if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(links, num_slots, c_slots, diff --git a/src/service/Dockerfile b/src/service/Dockerfile index 493094769..8b5101877 100644 --- a/src/service/Dockerfile +++ b/src/service/Dockerfile @@ -28,7 +28,7 @@ ENV PYTHONUNBUFFERED=0 # chmod +x /bin/grpc_health_probe # Get generic Python packages -RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade 'pip==25.2' RUN python3 -m pip install --upgrade setuptools wheel RUN python3 -m pip install --upgrade pip-tools diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index a346a134b..ac5e8afd8 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -454,6 +454,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ''' #multi-granular if oc_type == 1: + LOGGER.info(f"DEVELOP: deleting multi-granular service") if len(service.service_config.config_rules) > 0: c_rules_dict = json.loads( service.service_config.config_rules[0].custom.resource_value @@ -472,12 +473,12 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): params['ob_id' ] = ob_id params['flow_id'] = flow_id params['bidir' ] = bidir - - + LOGGER.info(f"DEVELOP mg: {params}") tasks_scheduler = TasksScheduler(self.service_handler_factory) tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) tasks_scheduler.execute_all() return Empty() + #flexigrid elif oc_type ==2 : if len(service.service_config.config_rules) > 0: @@ -493,31 +494,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): params['bitrate']=bitrate params['dst']=dst params['src']=src - params['ob_id']=ob_id params['flow_id']=flow_id params['bidir'] = bidir - - - tasks_scheduler = TasksScheduler(self.service_handler_factory) - tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) - tasks_scheduler.execute_all() - return Empty() - elif oc_type ==2 : - if len(service.service_config.config_rules) > 0: - c_rules_dict = json.loads( - service.service_config.config_rules[0].custom.resource_value) - ob_id=None - flow_id=None - - if ("flow_id" in c_rules_dict): - flow_id = c_rules_dict["flow_id"] - #if ("ob_id" in c_rules_dict): - # ob_id = c_rules_dict["ob_id"] - params['bitrate']=bitrate - params['dst']=dst - params['src']=src - params['flow_id']=flow_id - params['bidir'] = bidir + LOGGER.info(f"DEVELOP flexgrid: {params}") tasks_scheduler = TasksScheduler(self.service_handler_factory) tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) tasks_scheduler.execute_all() @@ -656,6 +635,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): new_connection = optical_reply.connections[0] #for candidate_new_connection in pathcomp_reply.connections: str_candidate_new_connection = connection_to_string(new_connection) + LOGGER.info('QQQQ_old:{}'.format(str_old_connection)) + LOGGER.info('QQQQ_new:{}'.format(str_candidate_new_connection)) # Change UUID of new connection to prevent collisions tmp_connection = Connection() tmp_connection.CopyFrom(new_connection) diff --git a/src/service/service/service_handlers/oc/OCServiceHandler.py b/src/service/service/service_handlers/oc/OCServiceHandler.py index 8aad5b17a..127f29c75 100644 --- a/src/service/service/service_handlers/oc/OCServiceHandler.py +++ b/src/service/service/service_handlers/oc/OCServiceHandler.py @@ -167,6 +167,7 @@ class OCServiceHandler(_ServiceHandler): flows = convert_or_endpoints_to_flows(endpoints, bidir) else: flows = endpoints_to_flows(endpoints, bidir, is_opticalband) + LOGGER.info(f'RERF:{flows}') for device_uuid, dev_flows in flows.items(): try: diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index cb09e553f..c9eaff31c 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -430,7 +430,7 @@ class TasksScheduler: if len(service.service_config.config_rules) > 0: for constraint in service.service_constraints: if "type" in constraint.custom.constraint_type: - oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) + oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) if oc_type == 2 : reply,code = delete_lightpath( params['src'] @@ -444,8 +444,7 @@ class TasksScheduler: , params ['dst'] , params['bitrate'] , params['ob_id'] - , delete_band=not has_media_channel - , flow_id= params['flow_id'] + , flow_id=params['flow_id'] ) if code == 400 and reply_not_allowed in reply : MSG = 'Deleteion for the service is not Allowed , Served Lightpaths is not empty' @@ -609,10 +608,15 @@ class TasksScheduler: self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING )) + #old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + # self._executor, old_connection.connection_id, old_connection.service_id + #)) + old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( - self._executor, old_connection.connection_id, old_connection.service_id + self._executor, old_connection.connection_id, True )) + new_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( self._executor, new_connection.connection_id )) @@ -739,7 +743,7 @@ class TasksScheduler: task = self._tasks.get(task_key) succeeded = True if dry_run else task.execute() results.append(succeeded) - LOGGER.debug('[execute_all] finished task {:s} ; succeeded={:s}'.format(str_task_name, str(succeeded))) + LOGGER.debug('[execute_allRRRR] finished task {:s} ; succeeded={:s}'.format(str_task_name, str(succeeded))) LOGGER.debug('[execute_all] results={:s}'.format(str(results))) return zip(ordered_task_keys, results) diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 4afa15f9c..4fb09718e 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -198,14 +198,14 @@ def get_optical_band(idx) -> str: return optical_band_uni_txt -def DelFlexLightpath( src, dst, bitrate, ob_id, delete_band, flow_id=None) -> str: - reply = "200" - delete_band = 1 if delete_band else 0 +def DelFlexLightpath( src, dst, bitrate, ob_id, flow_id=None) -> str: + reply = {} + code = 200 base_url = get_optical_controller_base_url() if not TESTING: if flow_id is not None: if ob_id is not None : - urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}/{}".format(base_url, src, dst, bitrate, flow_id,ob_id) + urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}/{}".format(base_url, src, dst, bitrate, flow_id, ob_id) else : #urlx = "http://{}:{}/OpticalTFS/DelOpticalBand/{}/{}/{}".format(OPTICAL_IP, OPTICAL_PORT, src, dst, ob_id) -- GitLab From 43c6e7a998d688e43bb313813e1921c8a6455a35 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Wed, 5 Nov 2025 21:41:50 +0100 Subject: [PATCH 07/41] BIDIR Bugs Fixed --- src/device/service/drivers/oc_driver/OCDriver.py | 2 +- .../service_handlers/oc/OCServiceHandler.py | 5 ++++- .../service/service_handlers/oc/OCTools.py | 15 +++++++++++---- .../service/task_scheduler/TaskExecutor.py | 1 + 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/device/service/drivers/oc_driver/OCDriver.py b/src/device/service/drivers/oc_driver/OCDriver.py index f7c093ba3..e1757b4ef 100644 --- a/src/device/service/drivers/oc_driver/OCDriver.py +++ b/src/device/service/drivers/oc_driver/OCDriver.py @@ -180,7 +180,7 @@ def edit_config( str_config_messages=delete_optical_band(resources) else : str_config_messages=disable_media_channel(resources) - + logger.info(f"messages ,{str_config_messages} ") for str_config_message in str_config_messages: # configuration of the received templates if str_config_message is None: raise UnsupportedResourceKeyException("CONFIG") diff --git a/src/service/service/service_handlers/oc/OCServiceHandler.py b/src/service/service/service_handlers/oc/OCServiceHandler.py index 127f29c75..27fc89563 100644 --- a/src/service/service/service_handlers/oc/OCServiceHandler.py +++ b/src/service/service/service_handlers/oc/OCServiceHandler.py @@ -98,7 +98,8 @@ class OCServiceHandler(_ServiceHandler): if not is_opticalband: LOGGER.info(f"ob-expanded bvalue is: {ob_expansion} and is_opticalband {is_opticalband}") return results - + LOGGER.info(f"is_opticalband {is_opticalband}") + LOGGER.info(f"set_Opticalconfig_endpoints is:{endpoints}") flows = endpoints_to_flows(endpoints, bidir, is_opticalband) #new cycle for setting optical devices @@ -166,6 +167,8 @@ class OCServiceHandler(_ServiceHandler): if is_openroadm: flows = convert_or_endpoints_to_flows(endpoints, bidir) else: + LOGGER.info(f"is_opticalband {is_opticalband}") + LOGGER.info(f'RERF endpoints :{endpoints}') flows = endpoints_to_flows(endpoints, bidir, is_opticalband) LOGGER.info(f'RERF:{flows}') diff --git a/src/service/service/service_handlers/oc/OCTools.py b/src/service/service/service_handlers/oc/OCTools.py index 14cd7cbed..e3fcac3c4 100644 --- a/src/service/service/service_handlers/oc/OCTools.py +++ b/src/service/service/service_handlers/oc/OCTools.py @@ -302,8 +302,15 @@ def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): #if bidir reading 4 endpoints per node if bidir: log.info(f"i starts with {i} ") - i = i + 1 - while(i < end-2): + device0 , endpoint0=endpoints[0][0:2] + device1 , endpoint1=endpoints[1][0:2] + finalend=end-2 + if device0 ==device1: + i = i + 1 + + else : + finalend=end-1 + while(i < finalend): #i endpoint = endpoints[i] device_uuid, endpoint_uuid = endpoint[0:2] @@ -317,7 +324,7 @@ def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): entry_tuple = endpoint_uuid, next_endpoint_uuid entries[device_uuid].append(entry_tuple) else: - + log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") return {} #i+2 @@ -333,7 +340,7 @@ def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): entries[device_uuid].append(entry_tuple) i = i + 4 else: - + log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") return {} else: while(i < end-1): diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index ff97fd931..2855a9170 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -148,6 +148,7 @@ class TaskExecutor: self, device : Device, settings : str, flows : list, is_opticalband : bool, connection_uuid:str ): + LOGGER.info(f"service optical config {settings}") device_key = get_device_key(device.device_id) optical_config_id = OpticalConfigId() optical_config_id.opticalconfig_uuid = opticalconfig_get_uuid(device.device_id) -- GitLab From 421eda139fcc23f8f4000b60c752b677a9a2ea46 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 7 Nov 2025 18:04:53 +0000 Subject: [PATCH 08/41] Refactor imports and enhance gNMI collector functionality - Updated import paths for _Collector in INTCollector.py. - Added missing scapy dependency in requirements.in. - Changed return types to Optional in HelperMethods.py. - Improved disconnect handling in GnmiOpenConfigCollector.py. - Added TOTAL_POWER KPI in KPI.py. - Enhanced PathMapper.py for wavelength router support. - Refined SubscriptionNew.py with graceful stop and response parsing. - Updated test cases in messages.py and test_unit_GnmiOpenConfigCollector.py for better logging and parameter handling. --- .../collectors/intcollector/INTCollector.py | 2 +- src/telemetry/backend/requirements.in | 1 + .../backend/service/HelperMethods.py | 7 +- .../gnmi_oc/GnmiOpenConfigCollector.py | 44 +++++-- .../backend/service/collectors/gnmi_oc/KPI.py | 1 + .../service/collectors/gnmi_oc/PathMapper.py | 26 +++- .../collectors/gnmi_oc/SubscriptionNew.py | 121 +++++++++++++++--- .../backend/tests/gnmi_oc/messages.py | 71 ++++++---- .../test_unit_GnmiOpenConfigCollector.py | 13 +- 9 files changed, 220 insertions(+), 66 deletions(-) diff --git a/src/telemetry/backend/collectors/intcollector/INTCollector.py b/src/telemetry/backend/collectors/intcollector/INTCollector.py index 9d89827f4..5931f33f3 100644 --- a/src/telemetry/backend/collectors/intcollector/INTCollector.py +++ b/src/telemetry/backend/collectors/intcollector/INTCollector.py @@ -20,7 +20,7 @@ from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.executors.pool import ThreadPoolExecutor from datetime import datetime -from telemetry.backend.collector_api._Collector import _Collector +from telemetry.backend.service.collector_api._Collector import _Collector from scapy.all import * import struct diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in index 3b1fd8b35..51ea6b906 100644 --- a/src/telemetry/backend/requirements.in +++ b/src/telemetry/backend/requirements.in @@ -20,3 +20,4 @@ kafka-python==2.0.6 numpy==2.0.1 pygnmi==0.8.14 pytz>=2025.2 +scapy==2.6.1 # TODO: UBI need to confirm the version (This depencdency was missing) diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index db56c9a76..0afd712c1 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -14,6 +14,7 @@ import uuid import logging +from typing import Optional from .collector_api._Collector import _Collector from .collector_api.DriverInstanceCache import get_driver from common.proto.kpi_manager_pb2 import KpiId @@ -24,7 +25,7 @@ LOGGER = logging.getLogger(__name__) def get_subscription_parameters( kpi_id : str, kpi_manager_client, context_client, duration, interval - ) -> list[tuple] | None: + ) -> Optional[list[tuple]]: """ Method to get subscription parameters based on KPI ID. Returns a list of tuples with subscription parameters. @@ -95,12 +96,12 @@ def get_subscription_parameters( def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, driver_instance_cache - ) -> _Collector | None: + ) -> Optional[_Collector]: """ Method to get a collector instance based on KPI ID. Preconditions: - A KPI Descriptor must be added in KPI DB with correct device_id. - - The device must be available in the context. + - The device must be available in the context DB. Returns: - Collector instance if found, otherwise None. Raises: diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index 44b169135..f84ba993f 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -75,12 +75,31 @@ class GNMIOpenConfigCollector(_Collector): def Disconnect(self) -> bool: """ Disconnect from the gNMI target device. + Stops all active subscriptions before closing the connection. """ + # Stop all active subscriptions first + if self._subscriptions: + self.logger.info("Stopping %d active subscription(s) before disconnect...", + len(self._subscriptions)) + # Create a list of subscription IDs to avoid dictionary size change during iteration + sub_ids = list(self._subscriptions.keys()) + for sub_id in sub_ids: + try: + self.UnsubscribeState(sub_id) + except Exception as exc: + self.logger.warning("Error stopping subscription %s during disconnect: %s", + sub_id, exc) + if self.connected and self.client: - self.client.close() - self.connected = False - self.logger.info("Disconnected from gNMI target %s:%s", self.address, self.port) - return True + try: + self.client.close() + self.connected = False + self.logger.info("Disconnected from gNMI target %s:%s", self.address, self.port) + return True + except Exception as exc: + self.logger.error("Error during disconnect: %s", exc) + self.connected = False # Mark as disconnected even if close fails + return False else: self.logger.warning("Not connected to any gNMI target.") return True @@ -129,18 +148,19 @@ class GNMIOpenConfigCollector(_Collector): return response def UnsubscribeState(self, resource_key: str) -> bool: - """Stop the given subscription.""" + """Stop the given subscription gracefully.""" sub = self._subscriptions.pop(resource_key, None) if not sub: - self.logger.error("Attempt to unsubscribe unknown id=%s", resource_key) - # raise KeyError(f"Unknown subscription id '{resource_key}'.") + self.logger.warning("Attempt to unsubscribe unknown id=%s", resource_key) return False - try: sub.stop() - except: - self.logger.exception("Error stopping subscription %s. ", resource_key) + + try: + sub.stop() + self.logger.info("Unsubscribed from state: %s", resource_key) + return True + except Exception as exc: + self.logger.error("Error stopping subscription %s: %s", resource_key, exc) return False - self.logger.info("Unsubscribed from state: %s", resource_key) - return True def GetState(self, duration : float, blocking : bool = True, terminate: Optional[queue.Queue] = None ) -> Iterator[Tuple[float, str, Any]]: diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py index 7281c8a2e..9ac80dff0 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py @@ -25,4 +25,5 @@ class KPI(IntEnum): # TODO: verify KPI names and codes with KPI proto fi BYTES_RECEIVED = 202 BYTES_DROPPED = 203 INBAND_POWER = 301 + TOTAL_POWER = 302 # TODO: Add more KPIs as needed, diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py index b02ca5598..a2c65c861 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py @@ -67,6 +67,13 @@ class PathMapper: KPI.INBAND_POWER: [ "inband-power", "inband-power-state" ], + + # ---- total power (optical wavelength router) ---------------- + # For optical devices using flex-scale-mg-on YANG model + # Path format: optical-power-total-input/instant or optical-power-total-output/instant + KPI.TOTAL_POWER: [ + "optical-power-total-input/instant", + ], } # --------------------------------------------------------------# @@ -78,6 +85,12 @@ class PathMapper: 'interfaces/interface[name={endpoint}]/state/counters/{leaf}', # 'interfaces/interface[name="{endpoint}"]/state/{leaf}', ] + + # Wavelength router prefixes (for optical devices) + # Uses oc-wave-router and fsmgon module prefixes to avoid origin extraction issues + _WAVELENGTH_ROUTER_PREFIXES = [ + 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index={endpoint}]/state/{leaf}', + ] # --------------------------------------------------------------# # Public helper # # --------------------------------------------------------------# @@ -88,9 +101,9 @@ class PathMapper: """ Return **a list** of path strings. - :param endpoint: Interface name, e.g. 'Ethernet0' + :param endpoint: Interface name (e.g. 'Ethernet0') or optical band index (e.g. '1') :param kpi: KPI enum - :param resource: Interface parameter + :param resource: Resource type: 'interface' or 'wavelength-router' """ try: kpi_enum = KPI(kpi) @@ -104,11 +117,16 @@ class PathMapper: paths: List[str] = [] for leaf in leaves: if resource == "interface": + # Use standard interface prefixes for prefix in cls._PREFIXES: paths.append(prefix.format(endpoint=endpoint, leaf=leaf)) + elif resource == "wavelength-router": + # Use wavelength router prefixes with module prefixes to avoid origin extraction + for prefix in cls._WAVELENGTH_ROUTER_PREFIXES: + paths.append(prefix.format(endpoint=endpoint, leaf=leaf)) else: raise ValueError(f"Unsupported resource: {resource}") - logger.debug("Built %d candidate path(s) for %s on %s", - len(paths), kpi_enum.name, endpoint) + logger.debug("Built %d candidate path(s) for %s on %s (resource=%s)", + len(paths), kpi_enum.name, endpoint, resource) return paths diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py index e34b2e472..789f5ec1c 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py @@ -13,11 +13,11 @@ # limitations under the License. -from google.protobuf.json_format import MessageToDict from pygnmi.client import gNMIclient # type: ignore from queue import Queue -from typing import Callable, Tuple, Optional, List +from typing import Callable, Tuple, Optional, List, Any import grpc +import json import logging import threading @@ -39,10 +39,10 @@ class Subscription: gnmi_client: gNMIclient, path_list: List[str], metric_queue: Queue, - mode: str = "stream", - sample_interval_ns: int = 10_000_000_000, - heartbeat_interval_ns: int | None = None, # ← NEW - encoding: str = "json_ietf", + mode: str = "stream", + sample_interval_ns: int = 10_000_000_000, + heartbeat_interval_ns: Optional[int] = None, + encoding: str = "json_ietf", on_update: Optional[Callable[[dict], None]] = None, ) -> None: @@ -70,10 +70,83 @@ class Subscription: return self._queue.get(timeout=timeout) def stop(self) -> None: + """Gracefully stop the subscription thread.""" + if not self._thread.is_alive(): + logger.debug("Subscription %s thread already stopped", self.sub_id) + return + + logger.debug("Stopping subscription %s...", self.sub_id) self._stop_event.set() - self._thread.join(2) - logger.info("Stopped subscription %s", self.sub_id) + self._thread.join(timeout=3) + + if self._thread.is_alive(): + logger.warning("Subscription %s thread did not stop within timeout", self.sub_id) + else: + logger.info("Stopped subscription %s", self.sub_id) + # --------------------------------------------------------------# + # Internal loop # + # --------------------------------------------------------------# + def _parse_subscribe_response(self, stream_msg) -> dict: + """ + Parse gNMI SubscribeResponse protobuf message. + Mimics pygnmi's telemetryParser but simplified for our needs. + Properly decodes json_ietf_val by directly accessing protobuf bytes. + """ + response = {} + + if stream_msg.HasField("update"): + response["update"] = { + "timestamp": stream_msg.update.timestamp if stream_msg.update.timestamp else 0, + "update": [] + } + + # Process updates + for update_msg in stream_msg.update.update: + update_container = { + "path": self._gnmi_path_to_string(update_msg.path) if update_msg.path else None + } + + # Decode the value - THIS IS THE KEY PART + if update_msg.HasField("val"): + if update_msg.val.HasField("json_ietf_val"): + # Access raw bytes and decode directly (like pygnmi does) + decoded_val = json.loads(update_msg.val.json_ietf_val) + update_container["val"] = decoded_val + elif update_msg.val.HasField("json_val"): + update_container["val"] = json.loads(update_msg.val.json_val) + elif update_msg.val.HasField("string_val"): + update_container["val"] = update_msg.val.string_val + elif update_msg.val.HasField("int_val"): + update_container["val"] = update_msg.val.int_val + elif update_msg.val.HasField("uint_val"): + update_container["val"] = update_msg.val.uint_val + elif update_msg.val.HasField("bool_val"): + update_container["val"] = update_msg.val.bool_val + elif update_msg.val.HasField("float_val"): + update_container["val"] = update_msg.val.float_val + else: + update_container["val"] = None + + response["update"]["update"].append(update_container) + + elif stream_msg.HasField("sync_response"): + response["sync_response"] = stream_msg.sync_response + + return response + + def _gnmi_path_to_string(self, path_msg) -> str: + """Convert gNMI Path protobuf to string representation.""" + path_parts = [] + for elem in path_msg.elem: + part = elem.name + if elem.key: + # Add keys in sorted order for consistency + for key_name, key_val in sorted(elem.key.items()): + part += f"[{key_name}={key_val}]" + path_parts.append(part) + return "/".join(path_parts) + # --------------------------------------------------------------# # Internal loop # # --------------------------------------------------------------# @@ -82,13 +155,12 @@ class Subscription: path_list: List[str], mode: str, sample_interval_ns: int, - heartbeat_interval_ns: int | None, + heartbeat_interval_ns: Optional[int], encoding: str, on_update: Optional[Callable[[dict], None]], ) -> None: # pragma: no cover """ Try each candidate path until the Subscribe RPC succeeds. - * Top level mode: STREAM / ONCE / POLL (here we always stream) * Per entry mode: SAMPLE / ON_CHANGE """ @@ -103,7 +175,7 @@ class Subscription: entry: dict = {"path": path} if entry_mode == "sample": - entry["mode"] = "sample" + entry["mode"] = "sample" entry["sample_interval"] = sample_interval_ns elif entry_mode == "on_change": entry["mode"] = "on_change" @@ -121,19 +193,23 @@ class Subscription: try: logger.debug("Sub %s attempting path %s", self.sub_id, path) for stream in self.gnmi_client.subscribe(request): - msg_dict = MessageToDict(stream) - # logger.debug("Stream: %s", msg_dict) + # Check if stop was requested + if self._stop_event.is_set(): + logger.debug("Sub %s stop requested, breaking stream loop", self.sub_id) + break + + # Parse the protobuf message directly (like pygnmi does) + msg_dict = self._parse_subscribe_response(stream) # Process any update data - if msg_dict.get('update'): # 'update' in msg_dict: + if msg_dict.get('update'): logger.debug("Sub %s got update data", self.sub_id) if on_update: on_update(msg_dict) else: self._queue.put(msg_dict) - # logger.debug("The update added in queue → %s", msg_dict) # Put a dummy update if syncResponse is received to prevent timeout - elif msg_dict.get('syncResponse'): # 'syncResponse' in msg_dict: + elif msg_dict.get('sync_response'): logger.debug("Sub %s received sync response", self.sub_id) # Optional: put a notification about the sync if not on_update: @@ -142,13 +218,18 @@ class Subscription: logger.warning("Sub %s received unknown message: %s", self.sub_id, msg_dict) except grpc.RpcError as err: - if err.code() == grpc.StatusCode.INVALID_ARGUMENT: + # Handle graceful shutdown (channel closed) + if err.code() == grpc.StatusCode.CANCELLED: + logger.debug("Sub %s cancelled (channel closed) - graceful shutdown", self.sub_id) + break + elif err.code() == grpc.StatusCode.INVALID_ARGUMENT: logger.warning("Path '%s' rejected (%s) -- trying next", path, err.details()) continue - logger.exception("Subscription %s hit gRPC error: %s", - self.sub_id, err) - break + else: + logger.exception("Subscription %s hit gRPC error: %s", + self.sub_id, err) + break except Exception as exc: # pylint: disable=broad-except logger.exception("Subscription %s failed: %s", self.sub_id, exc) diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index d68d2dde3..4722e4bfb 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional import uuid from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType @@ -20,48 +21,68 @@ from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI # Test device connection parameters devices = { 'device1': { - 'host': '10.1.1.86', - 'port': '6030', + 'host' : '10.1.1.86', + 'port' : '6030', 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, + 'kpi' : KPI.PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', }, 'device2': { - 'host': '10.1.1.87', - 'port': '6030', + 'host' : '10.1.1.87', + 'port' : '6030', 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, + 'kpi' : KPI.PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', }, 'device3': { - 'host': '172.20.20.101', - 'port': '6030', + 'host' : '172.20.20.101', + 'port' : '6030', 'username': 'admin', 'password': 'admin', 'insecure': True, + 'kpi' : KPI.PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', + }, + 'mgon': { + 'host' : 'localhost', + 'port' : '50061', + 'username': 'admin', + 'password': 'admin', + 'insecure': True, + 'kpi' : KPI.TOTAL_POWER, + 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint': '1', }, } -def creat_basic_sub_request_parameters( - resource: str = 'interface', - endpoint: str = 'Management0', # 'Ethernet1', - kpi: KPI = KPI.PACKETS_RECEIVED, # It should be KPI Id not name? Need to be replaced with KPI id. -) -> dict: +def creat_basic_sub_request_parameters() -> dict: - device = devices['device3'] - return { - 'target' : (device['host'], device['port']), - 'username' : device['username'], - 'password' : device['password'], - 'connect_timeout' : 15, - 'insecure' : device['insecure'], - 'mode' : 'on_change', # Subscription internal mode posibly: on_change, poll, sample - 'sample_interval_ns': '3s', - 'sample_interval' : '10s', - 'kpi' : kpi, - 'resource' : resource, - 'endpoint' : endpoint, - } + device = devices['device3'] + if device: + kpi = device['kpi'] + resource = device['resource'] + endpoint = device['endpoint'] + return { + 'target' : (device['host'], device['port']), + 'username' : device['username'], + 'password' : device['password'], + 'connect_timeout' : 15, + 'insecure' : device['insecure'], + 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample + 'sample_interval_ns': '3s', + 'sample_interval' : '10s', + 'kpi' : kpi, + 'resource' : resource, + 'endpoint' : endpoint, + } + return {} def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): _create_kpi_request = kpi_manager_pb2.KpiDescriptor() diff --git a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py index 127098d26..06f7632d4 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py @@ -17,6 +17,7 @@ import time import pytest from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector from .messages import creat_basic_sub_request_parameters +from ..Fixtures import kpi_manager_client, context_client logging.basicConfig( level=logging.DEBUG, @@ -25,6 +26,16 @@ logging.basicConfig( logger = logging.getLogger(__name__) +@pytest.fixture(autouse=True) +def log_all_methods(request): + ''' + This fixture logs messages before and after each test function runs, indicating the start and end of the test. + The autouse=True parameter ensures that this logging happens automatically for all tests in the module. + ''' + logger.info(f" >>>>> Starting test: {request.node.name} ") + yield + logger.info(f" <<<<< Finished test: {request.node.name} ") + @pytest.fixture def sub_parameters(): """Fixture to provide subscription parameters.""" @@ -98,7 +109,7 @@ def test_get_state_updates(collector, subscription_data): assert len(updates_received) > 0 -def test_unsubscribe_state(collector, subscription_data): +def test_unsubscribe_state(collector, subscription_data, kpi_manager_client, context_client): """Test unsubscribing from state.""" logger.info("----- Testing Unsubscribe -----") collector.SubscribeState(subscription_data) -- GitLab From e40fcec6e5720b17d88e56e13061f4d4b9eac9a6 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 10 Nov 2025 14:47:55 +0000 Subject: [PATCH 09/41] Enhance gNMI subscription handling and update test logging for dynamic durations --- .../service/collectors/gnmi_oc/SubscriptionNew.py | 15 ++++++++++++++- .../gnmi_oc/test_unit_GnmiOpenConfigCollector.py | 12 ++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py index 789f5ec1c..69c895a74 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py @@ -112,9 +112,22 @@ class Subscription: if update_msg.val.HasField("json_ietf_val"): # Access raw bytes and decode directly (like pygnmi does) decoded_val = json.loads(update_msg.val.json_ietf_val) + # Try to convert numeric strings to float for proper formatting + if isinstance(decoded_val, str): + try: + decoded_val = float(decoded_val) + except (ValueError, TypeError): + pass # Keep as string if not numeric update_container["val"] = decoded_val elif update_msg.val.HasField("json_val"): - update_container["val"] = json.loads(update_msg.val.json_val) + decoded_val = json.loads(update_msg.val.json_val) + # Try to convert numeric strings to float + if isinstance(decoded_val, str): + try: + decoded_val = float(decoded_val) + except (ValueError, TypeError): + pass + update_container["val"] = decoded_val elif update_msg.val.HasField("string_val"): update_container["val"] = update_msg.val.string_val elif update_msg.val.HasField("int_val"): diff --git a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py index 06f7632d4..ccdb6d38b 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py @@ -128,17 +128,21 @@ def test_full_workflow(collector, subscription_data): response1 = collector.SubscribeState(subscription_data) logger.info("Subscription started: %s", subscription_data) assert all(response1) and isinstance(response1, list) + + _, _, duration_received, interval_received = subscription_data[0] # Get updates - logger.info("Requesting state updates for 5 seconds ...") + logger.info(f"Requesting state updates for {duration_received} seconds after every {interval_received} seconds ...") updates_received = [] - for samples in collector.GetState(duration=5.0, blocking=True): + for samples in collector.GetState(duration=duration_received, blocking=True): logger.info("Received state update: %s", samples) updates_received.append(samples) assert len(updates_received) > 0 # Wait for additional updates - logger.info("Waiting for updates for 5 seconds...") - time.sleep(5) + logger.info(f"Waiting for updates after every {interval_received} seconds...") + + # put a sleep to simulate waiting for more updates + time.sleep(15) # Unsubscribe response2 = collector.UnsubscribeState("x123") -- GitLab From a1260aacf3cf50342ee92792c020d8c146964e45 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 10 Nov 2025 15:21:28 +0000 Subject: [PATCH 10/41] Add Optical Power Total Input KPI to proto and update corresponding Python enum --- proto/kpi_sample_types.proto | 1 + src/telemetry/backend/service/collectors/gnmi_oc/KPI.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto index 5fcda6df9..19cd59f15 100644 --- a/proto/kpi_sample_types.proto +++ b/proto/kpi_sample_types.proto @@ -31,6 +31,7 @@ enum KpiSampleType { KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602; diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py index 9ac80dff0..4a57ab8dc 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py @@ -25,5 +25,5 @@ class KPI(IntEnum): # TODO: verify KPI names and codes with KPI proto fi BYTES_RECEIVED = 202 BYTES_DROPPED = 203 INBAND_POWER = 301 - TOTAL_POWER = 302 + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 302 # TODO: Add more KPIs as needed, -- GitLab From 9ad39336bbff1cdbc1c979c9c1c1c6356dd8daf8 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 10 Nov 2025 16:22:43 +0100 Subject: [PATCH 11/41] temo mon integration --- ofc26.sh | 229 ++++++++++++++++++++++++++++++ scripts/run_mon_test.sh | 28 ++++ src/service/service/monitoring.py | 148 +++++++++++++++++++ 3 files changed, 405 insertions(+) create mode 100644 ofc26.sh create mode 100755 scripts/run_mon_test.sh create mode 100644 src/service/service/monitoring.py diff --git a/ofc26.sh b/ofc26.sh new file mode 100644 index 000000000..e6926b64e --- /dev/null +++ b/ofc26.sh @@ -0,0 +1,229 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate OSM Client +#export TFS_COMPONENTS="${TFS_COMPONENTS} osm_client" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Telemetry Config ------------------------------------------------------ + +# Define a Load Balancer IP for Telemetry Collector components +export LOAD_BALANCER_IP="192.168.5.250" # <-- Change this to match your network diff --git a/scripts/run_mon_test.sh b/scripts/run_mon_test.sh new file mode 100755 index 000000000..874e6bcda --- /dev/null +++ b/scripts/run_mon_test.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +#RCFILE=$PROJECTDIR/coverage/.coveragerc + +export KFK_SERVER_ADDRESS='127.0.0.1:9092' + +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" + +python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ + service/service/monitoring.py diff --git a/src/service/service/monitoring.py b/src/service/service/monitoring.py new file mode 100644 index 000000000..dad72f00a --- /dev/null +++ b/src/service/service/monitoring.py @@ -0,0 +1,148 @@ +import uuid +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from kpi_manager.client.KpiManagerClient import KpiManagerClient +import logging +import pytest +from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList + +import uuid +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI + +from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def kpi_manager_client(): + LOGGER.info('Starting KpiManagerClient...') + _client = KpiManagerClient(host="10.152.183.91") + _client.connect() + LOGGER.info('Yielding Connected KpiManagerClient...') + yield _client + LOGGER.info('Closed KpiManagerClient...') + _client.close() + + + +def create_kpi_descriptor_request(descriptor_name: str = "optical_monitoring"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + #_create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + # _create_kpi_request.kpi_id.kpi_id.uuid = "f974b6cc-095f-4767-b8c1-3457b383fb99" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT + #_create_kpi_request.device_id.device_uuid.uuid = str(uuid.uuid4()) + _create_kpi_request.device_id.device_uuid.uuid = "5dc3f5d7-d3a9-5057-a9a0-8af943a5461c" + _create_kpi_request.service_id.service_uuid.uuid = 'SERV2' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + #_create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "decb9c95-7298-5ec8-a4b6-7f276f595106" + _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + + +''' +def test_SetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") + response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) +''' + +''' +def test_GetKpiDescriptor(kpi_manager_client): + LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") + # adding KPI + response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) + # get KPI + response = kpi_manager_client.GetKpiDescriptor(response_id) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiDescriptor) +''' + +# Test device connection parameters +devices = { + 'device1': { + 'host': '172.17.254.22', + 'port': '50061', + 'username': 'admin', + 'password': 'admin', + 'insecure': True, + } +} + +def create_basic_sub_request_parameters( + resource: str = 'components', + endpoint: str = 'port-1-in', # 'Ethernet1', + kpi: KPI = KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT, # It should be KPI Id not name? Need to be replaced with KPI id. +) -> dict: + + device = devices['device1'] + return { + 'target' : (device['host'], device['port']), + 'username' : device['username'], + 'password' : device['password'], + 'connect_timeout' : 15, + 'insecure' : device['insecure'], + 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample + 'sample_interval_ns': '3s', + 'sample_interval' : '10s', + 'kpi' : kpi, + 'resource' : resource, + 'endpoint' : endpoint, + } + + +@pytest.fixture +def sub_parameters(): + """Fixture to provide subscription parameters.""" + return create_basic_sub_request_parameters() + + +@pytest.fixture +def collector(sub_parameters): + """Fixture to create and connect GNMI collector.""" + collector = GNMIOpenConfigCollector( + username = sub_parameters['username'], + password = sub_parameters['password'], + insecure = sub_parameters['insecure'], + address = sub_parameters['target'][0], + port = sub_parameters['target'][1], + ) + collector.Connect() + yield collector + collector.Disconnect() + + +@pytest.fixture +def subscription_data(sub_parameters): + """Fixture to provide subscription data.""" + # It should return a list of tuples with subscription parameters. + return [ + ( + "sub_id_123", + { + "kpi" : sub_parameters['kpi'], + "endpoint" : sub_parameters['endpoint'], + "resource" : sub_parameters['resource'], + }, + float(10.0), + float(5.0), + ), + ] + + +def test_collector_connection(collector): + """Test collector connection.""" + LOGGER.info("----- Testing GNMI OpenConfig Collector Connection -----") + assert collector.connected is True + LOGGER.debug("Collector connected: %s", collector.connected) + + -- GitLab From fb90ccbdf3df48e023c585b5f2443664adeadc76 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 29 Jan 2026 13:08:46 +0000 Subject: [PATCH 12/41] Refactor Telemetry Collector to handle MGON Streaming - KPI handling and logging in gNMI collector - Update in SubscriptionNew.py - KPI type added in test --- .../backend/service/HelperMethods.py | 11 ++- .../gnmi_oc/GnmiOpenConfigCollector.py | 17 ++-- .../service/collectors/gnmi_oc/PathMapper.py | 7 +- .../collectors/gnmi_oc/SubscriptionNew.py | 81 +++++++++++++------ .../backend/tests/gnmi_oc/messages.py | 4 +- 5 files changed, 75 insertions(+), 45 deletions(-) diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index 0afd712c1..19107be6b 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -58,7 +58,7 @@ def get_subscription_parameters( include_components = False ) if not device: - raise Exception(f"KPI ID: {kpi_id} - Device not found for KPI descriptor.") + raise Exception(f"KPI ID: {kpi_id} - Device not found for KPI descriptor.") #TODO: Change to TFS NotFoundException endpoints = device.device_endpoints # LOGGER.info(f"Device for KPI ID: {kpi_id} - {endpoints}") @@ -103,9 +103,8 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri - A KPI Descriptor must be added in KPI DB with correct device_id. - The device must be available in the context DB. Returns: - - Collector instance if found, otherwise None. - Raises: - - Exception if the KPI ID is not found or the collector cannot be created. + - Collector instance if found, otherwise raises exception + if the KPI ID is not found or the collector cannot be created. """ LOGGER.info(f"Getting collector for KPI ID: {kpi_id}") kpi_id_obj = KpiId() @@ -113,7 +112,7 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) # LOGGER.info(f"KPI Descriptor: {kpi_descriptor}") if not kpi_descriptor: - raise Exception(f"KPI ID: {kpi_id} - Descriptor not found.") + raise Exception(f"KPI ID: {kpi_id} - Descriptor not found.") #TODO: Change to TFS NotFoundException # device_uuid = kpi_descriptor.device_id.device_uuid.uuid device = get_device( context_client = context_client, @@ -125,6 +124,6 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri # Getting device collector (testing) collector : _Collector = get_driver(driver_instance_cache, device) if collector is None: - raise Exception(f"KPI ID: {kpi_id} - Collector not found for device {device.device_uuid.uuid}.") + raise Exception(f"KPI ID: {kpi_id} - Collector not found for device {device.device_uuid.uuid}.") #TODO: Change to TFS NotFoundException # LOGGER.info(f"Collector for KPI ID: {kpi_id} - {collector.__class__.__name__}") return collector diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index f84ba993f..602d2f8d9 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -19,7 +19,7 @@ from typing import Dict, Optional, Tuple, List, Union, Any, Iterator from pygnmi.client import gNMIclient from telemetry.backend.service.collector_api._Collector import _Collector from .PathMapper import PathMapper -from .SubscriptionNew import Subscription +from .SubscriptionNew import LOGGER, Subscription logging.basicConfig( level=logging.DEBUG, @@ -124,22 +124,21 @@ class GNMIOpenConfigCollector(_Collector): raise KeyError("Endpoint dictionary must contain 'resource' key.") paths = PathMapper.build( - endpoint=sub_endpoint['endpoint'], - kpi=sub_endpoint['kpi'], - resource=sub_endpoint['resource'], + endpoint = sub_endpoint['endpoint'], + kpi = sub_endpoint['kpi' ], + resource = sub_endpoint['resource'], ) - + LOGGER.debug("Built %d candidate path(s) for endpoint '%s'", len(paths), sub_endpoint['endpoint']) self._subscriptions[sub_id] = Subscription( sub_id = sub_id, gnmi_client = self.client, # type: ignore path_list = paths, # <- list of paths metric_queue = self._output_queue, - mode = 'stream', # Default mode + mode = 'sample', # Entry mode: sample/on_change/target_defined sample_interval_ns = int(interval * 1_000_000_000), # Convert seconds to nanoseconds - heartbeat_interval_ns = int(duration * 1_000_000_000), # Convert seconds to nanoseconds - encoding = 'json_ietf', # Default encoding + total_duration = duration, + encoding = 'json', # Use 'json' encoding (not 'json_ietf') ) - self.logger.info("Subscribing to %s with job_id %s ...", sub_endpoint, sub_id) response.append(True) except: diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py index a2c65c861..dc467d1f9 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py @@ -69,10 +69,9 @@ class PathMapper: ], # ---- total power (optical wavelength router) ---------------- - # For optical devices using flex-scale-mg-on YANG model - # Path format: optical-power-total-input/instant or optical-power-total-output/instant - KPI.TOTAL_POWER: [ - "optical-power-total-input/instant", + # For optical devices using FlexScale MGON YANG model + KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT : [ + "optical-power-total-input/instant", ], } diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py index 69c895a74..8d20aca0b 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py @@ -13,6 +13,7 @@ # limitations under the License. +import time from pygnmi.client import gNMIclient # type: ignore from queue import Queue from typing import Callable, Tuple, Optional, List, Any @@ -21,8 +22,8 @@ import json import logging import threading -logger = logging.getLogger(__name__) -# logger.setLevel(logging.INFO) +LOGGER = logging.getLogger(__name__) +# LOGGER.setLevel(logging.INFO) class Subscription: @@ -39,10 +40,11 @@ class Subscription: gnmi_client: gNMIclient, path_list: List[str], metric_queue: Queue, - mode: str = "stream", - sample_interval_ns: int = 10_000_000_000, - heartbeat_interval_ns: Optional[int] = None, - encoding: str = "json_ietf", + mode: str = "stream", + sample_interval_ns: int = 10_000_000_000, + heartbeat_interval_ns: Optional[int] = 10_000_000_000, + total_duration: Optional[float] = 60.0, # in seconds + encoding: str = "json_ietf", on_update: Optional[Callable[[dict], None]] = None, ) -> None: @@ -54,15 +56,30 @@ class Subscription: self._thread = threading.Thread( target = self._run, args = ( - path_list, mode, - sample_interval_ns, heartbeat_interval_ns, encoding, on_update, + path_list, mode, sample_interval_ns, + heartbeat_interval_ns, encoding, on_update, ), name=f"gnmi-sub-{sub_id[:8]}", daemon=True, ) + # Start the subscription thread self._thread.start() - logger.info("Started subscription %s",sub_id) + + # Stop the subscription after the given duration + if total_duration and total_duration > 0: + def stop_after_duration(): + time.sleep(total_duration) + LOGGER.warning(f"Execution duration ({total_duration}s) completed for Subscription: {sub_id}") + self.stop() + + duration_thread = threading.Thread( + target=stop_after_duration, daemon=True, name=f"stop_after_duration_{sub_id[:8]}" + ) + duration_thread.start() + else: + LOGGER.debug("Subscription %s has no total duration limit", sub_id) + LOGGER.info("Started subscription %s",sub_id) # --------------------------------------------------------------# # Public helpers # # --------------------------------------------------------------# @@ -72,17 +89,17 @@ class Subscription: def stop(self) -> None: """Gracefully stop the subscription thread.""" if not self._thread.is_alive(): - logger.debug("Subscription %s thread already stopped", self.sub_id) + LOGGER.debug("Subscription %s thread already stopped", self.sub_id) return - logger.debug("Stopping subscription %s...", self.sub_id) + LOGGER.debug("Stopping subscription %s...", self.sub_id) self._stop_event.set() self._thread.join(timeout=3) if self._thread.is_alive(): - logger.warning("Subscription %s thread did not stop within timeout", self.sub_id) + LOGGER.warning("Subscription %s thread did not stop within timeout", self.sub_id) else: - logger.info("Stopped subscription %s", self.sub_id) + LOGGER.info("Stopped subscription %s", self.sub_id) # --------------------------------------------------------------# # Internal loop # @@ -186,6 +203,7 @@ class Subscription: break entry: dict = {"path": path} + LOGGER.debug("Subscription %s preparing entry for path: %s", self.sub_id, path) if entry_mode == "sample": entry["mode"] = "sample" @@ -202,50 +220,65 @@ class Subscription: "mode": top_mode, "encoding": encoding, } - logger.debug("Subscription %s to be requested: %s", self.sub_id, request) + LOGGER.debug("Subscription %s to be requested: %s", self.sub_id, request) try: - logger.debug("Sub %s attempting path %s", self.sub_id, path) + LOGGER.debug("Sub %s attempting path %s", self.sub_id, path) for stream in self.gnmi_client.subscribe(request): # Check if stop was requested if self._stop_event.is_set(): - logger.debug("Sub %s stop requested, breaking stream loop", self.sub_id) + LOGGER.debug("Sub %s stop requested, breaking stream loop", self.sub_id) break + LOGGER.debug("Sub %s received stream message: %s", self.sub_id, stream) + + # DEBUG: Check if update has actual update messages + if stream.HasField("update"): + LOGGER.debug("Sub %s update field present, num updates: %d", + self.sub_id, len(stream.update.update)) + if len(stream.update.update) == 0: + LOGGER.warning("Sub %s received update notification with NO data values - device may have no data for path %s", + self.sub_id, path) + for i, upd in enumerate(stream.update.update): + LOGGER.debug("Sub %s update[%d] has val: %s, path elem count: %d", + self.sub_id, i, upd.HasField("val"), + len(upd.path.elem) if upd.path else 0) + # Parse the protobuf message directly (like pygnmi does) msg_dict = self._parse_subscribe_response(stream) + LOGGER.debug("Sub %s received message: %s", self.sub_id, msg_dict) # Process any update data if msg_dict.get('update'): - logger.debug("Sub %s got update data", self.sub_id) + LOGGER.debug("Sub %s got update data", self.sub_id) if on_update: on_update(msg_dict) else: self._queue.put(msg_dict) # Put a dummy update if syncResponse is received to prevent timeout elif msg_dict.get('sync_response'): - logger.debug("Sub %s received sync response", self.sub_id) + LOGGER.debug("Sub %s received sync response", self.sub_id) # Optional: put a notification about the sync if not on_update: self._queue.put({"type": "sync_response", "value": True}) else: - logger.warning("Sub %s received unknown message: %s", self.sub_id, msg_dict) + LOGGER.warning("Sub %s received unknown message: %s", self.sub_id, msg_dict) except grpc.RpcError as err: # Handle graceful shutdown (channel closed) if err.code() == grpc.StatusCode.CANCELLED: - logger.debug("Sub %s cancelled (channel closed) - graceful shutdown", self.sub_id) + LOGGER.debug("Sub %s cancelled (channel closed) - graceful shutdown", self.sub_id) break elif err.code() == grpc.StatusCode.INVALID_ARGUMENT: - logger.warning("Path '%s' rejected (%s) -- trying next", + LOGGER.warning("Path '%s' rejected (%s) -- trying next", path, err.details()) continue else: - logger.exception("Subscription %s hit gRPC error: %s", + LOGGER.exception("Subscription %s hit gRPC error: %s", # Change with TFS Exception self.sub_id, err) break except Exception as exc: # pylint: disable=broad-except - logger.exception("Subscription %s failed: %s", self.sub_id, exc) + LOGGER.exception("Subscription %s failed: %s", self.sub_id, exc) # Change with TFS Exception break - logger.info("Subscription thread %s terminating", self.sub_id) + LOGGER.info("Subscription thread %s terminating", self.sub_id) diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index 4722e4bfb..1b7e4f566 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -56,7 +56,7 @@ devices = { 'username': 'admin', 'password': 'admin', 'insecure': True, - 'kpi' : KPI.TOTAL_POWER, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT, 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model 'endpoint': '1', }, @@ -64,7 +64,7 @@ devices = { def creat_basic_sub_request_parameters() -> dict: - device = devices['device3'] + device = devices['mgon'] if device: kpi = device['kpi'] resource = device['resource'] -- GitLab From ff7c481185082722862bb5a4115b24e687735621 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 9 Feb 2026 01:45:15 +0100 Subject: [PATCH 13/41] first version with preferreb band, alien waves and full spectrum --- manifests/telemetryservice.yaml | 2 +- src/opticalcontroller/OpticalController.py | 47 +++- src/opticalcontroller/RSA.py | 209 +++++++++++++-- src/opticalcontroller/tools.py | 49 +++- src/opticalcontroller/variables.py | 2 +- .../service/ServiceServiceServicerImpl.py | 204 ++++++++++---- .../service_handlers/oc/OCServiceHandler.py | 18 +- .../service/service_handlers/oc/OCTools.py | 250 ++++++++++++------ src/service/service/tools/OpticalTools.py | 26 +- src/service/tests/test_recon.py | 12 +- src/service/tests/test_recon2.py | 101 +++++++ 11 files changed, 737 insertions(+), 183 deletions(-) create mode 100644 src/service/tests/test_recon2.py diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml index 935267250..2add96516 100644 --- a/manifests/telemetryservice.yaml +++ b/manifests/telemetryservice.yaml @@ -95,7 +95,7 @@ metadata: app: telemetryservice spec: type: LoadBalancer - loadBalancerIP: _LOAD_BALANCER_IP_ + loadBalancerIP: 192.168.5.250 externalTrafficPolicy: Local selector: app: telemetryservice diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index cdb999f8e..90c2c246c 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -69,18 +69,18 @@ class AddLightpath(Resource): #@optical.route('/AddFlexLightpath///') -@optical.route('/AddFlexLightpath///', +@optical.route('/AddFlexLightpath////', defaults={"bidir": 1, "band": None, "obx_idx": None}) -@optical.route('/AddFlexLightpath////', +@optical.route('/AddFlexLightpath/////', defaults={"band": None, "obx_idx": None}) -@optical.route('/AddFlexLightpath/////', +@optical.route('/AddFlexLightpath//////', defaults={"obx_idx": None}) -@optical.route('/AddFlexLightpath//////') +@optical.route('/AddFlexLightpath///////') @optical.response(200, 'Success') @optical.response(404, 'Error, not found') class AddFlexLightpath(Resource): @staticmethod - def put(src, dst, bitrate, bidir=1, band=None, obx_idx = None): + def put(src, dst, bitrate, pref, bidir=0, band=None, obx_idx = None ): print("INFO: New MGON request from {} to {} with rate {} and band {}".format(src, dst, bitrate, band)) t0 = time.time()*1000.0 @@ -88,7 +88,7 @@ class AddFlexLightpath(Resource): # rsa.g.printGraph() if rsa is not None: - flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band, obx_idx) + flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band, obx_idx, pref) if flow_id is not None: if rsa.db_flows[flow_id]["op-mode"] == 0: return 'No path found', 404 @@ -109,14 +109,39 @@ class AddFlexLightpath(Resource): else: return "Error", 404 +@optical.route('/AddAlienFLexLightpath//////', + defaults={"bidir": 0}) +@optical.route('/AddAlienFLexLightpath///////') +@optical.response(200, 'Success') +@optical.response(404, 'Error, not found') +class AddAlienFLexLightpath(Resource): + @staticmethod + def put(src, s_port, dst, d_port, band, obx_idx, bidir=0): + + print("INFO: New Alien MGON request from {} to {} with band {}".format(src, dst, band)) + t0 = time.time()*1000.0 + #if debug: + # rsa.g.printGraph() + + if rsa is not None: + flow_id = rsa.rsa_fs_alien_computation(src, s_port, dst, d_port, band, bidir, obx_idx) + if flow_id is not None: + if not rsa.db_flows[flow_id]["is_active"]: + return 'No path found', 404 + t1 = time.time() * 1000.0 + elapsed = t1 - t0 + print("INFO: time elapsed = {} ms".format(elapsed)) + return rsa.db_flows[flow_id], 200 + + # @optical.route('/DelFlexLightpath////') -@optical.route('/DelFlexLightpath////') -@optical.route('/DelFlexLightpath/////') +@optical.route('/DelFlexLightpath///') +@optical.route('/DelFlexLightpath////') @optical.response(200, 'Success') @optical.response(404, 'Error, not found') class DelFLightpath(Resource): @staticmethod - def delete( src, dst, bitrate, o_band_id=None, flow_id=None): + def delete( src, dst, flow_id, o_band_id=None): flow = None match1=False ob_id=None @@ -124,13 +149,13 @@ class DelFLightpath(Resource): if flow_id in rsa.db_flows.keys(): flow = rsa.db_flows[flow_id] - match1 = flow["src"] == src and flow["dst"] == dst and flow["bitrate"] == bitrate + match1 = flow["src"] == src and flow["dst"] == dst ob_id = flow["parent_opt_band"] if 'parent_opt_band' in flow else None flow['is_active']=False if flow is not None: bidir = flow["bidir"] if bidir: - match2 = flow["src"] == dst and flow["dst"] == src and flow["bitrate"] == bitrate + match2 = flow["src"] == dst and flow["dst"] == src if match1 or match2: ob_id = flow["parent_opt_band"] if 'parent_opt_band' in flow else None rsa.db_flows[flow_id]["is_active"] = False diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index f1d2a1a0b..df4453be7 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -68,18 +68,19 @@ class RSA(): def init_link_slots2(self): if full_links: + print("2026 initialize full spectrum") for l in self.links_dict["optical_links"]: fib = l["optical_details"] #fib = self.links_dict[l]["fibers"][f] if len(fib["c_slots"]) > 0: for c in range(0, Nc): - fib["c_slots"][c] = 1 + fib["c_slots"][str(c)] = 1 if len(fib["l_slots"]) > 0: for c in range(0, Nl): - fib["l_slots"][c] = 1 + fib["l_slots"][str(c)] = 1 if len(fib["s_slots"]) > 0: for c in range(0, Ns): - fib["s_slots"][c] = 1 + fib["s_slots"][str(c)] = 1 if debug: print(fib) for l1 in self.links_dict["optical_links"]: @@ -291,13 +292,14 @@ class RSA(): else: s_sts = [] if old_band_x == "c_slots": - c_sts = [] l_sts = [] + s_sts = [] if old_band_x == "l_slots": c_sts = [] l_sts = [] if old_band_x == "s_slots": - s_sts = [] + c_sts = [] + a_sts = [] return c_sts, l_sts, s_sts @@ -606,10 +608,10 @@ class RSA(): return fiber_list #function invoked for lightpaths and OB - def select_slots_and_ports(self, links, n_slots, c, l, s, bidir): + def select_slots_and_ports(self, links, n_slots, c, l, s, bidir, preferred=None): if debug: print (links, n_slots, c, l, s, bidir, self.c_slot_number, self.l_slot_number, self.s_slot_number) - band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) + band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number, preferred) if debug: print (band, slots) if band is None: @@ -687,6 +689,7 @@ class RSA(): return None, None, None, None, None if debug: print(f"INFO: XXXX {band}, {slots}") + self.get_fibers_forward(links, slots, band) if bidir: self.get_fibers_backward(links, slots, band) @@ -774,13 +777,85 @@ class RSA(): #if debug: # print(self.links_dict) - print("INFO: 4") #if debug: # print(t_flows) print("INFO: Flow matrix computed for Flex Lightpath") return t_flows, band, slots, {}, {} + #function ivoked for fs lightpaths only + def alien_select_slots_and_ports_fs(self, src_port, dst_port, n_slots, c, l, s, bidir, o_band_id): + print("PDP: inside flow creation") + band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) + if band is None: + print("PDP ERROR: No slots available in the three bands") + return None, None, None, None, None + print(f"PDP: {band}, {slots}") + #if debug: + # print(f"INFO: XXXX {band}, {slots}") + + self.update_optical_band(o_band_id, slots, band) + print("INFO: 1") + + t_flows = {} + + ''' + #flows_add_side + src, dst = add.split("-") + lx = self.get_link_by_name(add)["optical_details"] + #outport = self.links_dict[add]['fibers'][f]["src_port"] + outport = lx["src_port"] + #T1 rules + t_flows[src] = {} + t_flows[src]["f"] = {} + t_flows[src]["b"] = {} + t_flows[src]["f"] = {"in": port_0, "out": outport} + if bidir: + #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] + r_inport = lx["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": port_0} + print("INFO: 2") + ''' + src = self.optical_bands[o_band_id]["src"] + dst = self.optical_bands[o_band_id]["dst"] + #R1 rules + t_flows[src] = {} + t_flows[src]["f"] = {} + t_flows[src]["b"] = {} + opt_band_src_port = self.optical_bands[o_band_id]["src_port"] + t_flows[src]["f"] = {"in": src_port, "out": opt_band_src_port} + #to modify to peer ports + ''' + if bidir: + #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] + r_inport = lx["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": port_0} + if bidir: + rev_opt_band_dst_port = self.optical_bands[o_band_id]["rev_dst_port"] + #r_outport = self.links_dict[add]['fibers'][f]["remote_peer_port"] + r_outport = lx["remote_peer_port"] + t_flows[dst]["b"] = {"in": rev_opt_band_dst_port, "out": r_outport} + ''' + + #flows_drop_side + # R2 rules + + t_flows[dst] = {} + t_flows[dst]["f"] = {} + t_flows[dst]["b"] = {} + opt_band_dst_port = self.optical_bands[o_band_id]["dst_port"] + t_flows[dst]["f"] = {"in": opt_band_dst_port, "out": dst_port} + ''' + if bidir: + rev_opt_band_src_port = self.optical_bands[o_band_id]["rev_src_port"] + #r_inport = self.links_dict[drop]['fibers'][f]["local_peer_port"] + r_inport = ly["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": rev_opt_band_src_port} + ''' + print("PDP: Flow matrix computed for Alien Flex Lightpath") + + return t_flows, band, slots, {}, {} + def rsa_computation(self, src, dst, rate, bidir): if self.flow_id == 0: self.flow_id += 1 @@ -869,7 +944,7 @@ class RSA(): #self.db_flows[flow_id]["parent_opt_band"] = 0 #self.db_flows[flow_id]["new_optical_band"] = 0 - def create_optical_band(self, links, path, bidir, num_slots, old_band_x=None): + def create_optical_band(self, links, path, bidir, num_slots, old_band_x=None, preferred=None): print("INFO: Creating optical-band of {} slots".format(num_slots)) if self.opt_band_id == 0: self.opt_band_id += 1 @@ -924,7 +999,7 @@ class RSA(): print(l_slots) print(s_slots) if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0: - flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir) + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir, preferred) if debug: print(flow_list, band_range, slots, fiber_f, fiber_b) f0, band = frequency_converter(band_range, slots) @@ -1020,7 +1095,108 @@ class RSA(): result.append(ob_id) return result - def rsa_fs_computation(self, src, dst, rate, bidir, band, bandx_id): + def get_alien_slots(self, optical_band_id, num_slots): + print(f"ALIEN SLOTS: {self.optical_bands[optical_band_id]}") + if "c_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["c_slots"]) > 0: + #a_c = c_sts + #MOD + c_sts = consecutives(self.optical_bands[optical_band_id]["c_slots"], num_slots) + #c_sts = common_slots(a_c, b_c) + else: + c_sts = [] + if "l_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["l_slots"]) > 0: + #a_l = l_sts + l_sts = consecutives(self.optical_bands[optical_band_id]["l_slots"], num_slots) + #l_sts = common_slots(a_l, b_l) + else: + l_sts = [] + if "s_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["s_slots"]) > 0: + #a_s = s_sts + s_sts = consecutives(self.optical_bands[optical_band_id]["s_slots"], num_slots) + #s_sts = common_slots(a_s, b_s) + else: + s_sts = [] + return c_sts, l_sts, s_sts + + + def rsa_fs_alien_computation(self, src, s_port, dst, d_port, band, bidir, obx_idx): + if self.flow_id == 0: + self.flow_id += 1 + else: + if (self.db_flows[self.flow_id]["bidir"] == 1): + self.flow_id += 2 + else: + self.flow_id += 1 + if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM": + if obx_idx in self.optical_bands.keys(): + #optical_band = self.optical_bands[obx_idx] + num_slots = map_band_to_slot(band) + self.db_flows[self.flow_id] = {} + self.db_flows[self.flow_id]["flow_id"] = self.flow_id + self.db_flows[self.flow_id]["src"] = src + self.db_flows[self.flow_id]["dst"] = dst + self.db_flows[self.flow_id]["bitrate"] = None + self.db_flows[self.flow_id]["bidir"] = bidir + self.db_flows[self.flow_id]["src_port"] = s_port + self.db_flows[self.flow_id]["dst_port"] = d_port + + c_slots, l_slots, s_slots = self.get_alien_slots(obx_idx, num_slots) + if debug: + print(f"PDP: {c_slots}") + print(f"PDP: {l_slots}") + print(f"PDP: {s_slots}") + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.alien_select_slots_and_ports_fs(s_port, d_port, num_slots, + c_slots, l_slots, s_slots, bidir, + obx_idx) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Alien Flex Lightpath with OB already in place") + if flow_list is None: + self.null_values(self.flow_id) + return self.flow_id + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = None + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = [] + self.db_flows[self.flow_id]["path"] = [] + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = obx_idx + self.db_flows[self.flow_id]["new_optical_band"] = 0 + self.optical_bands[obx_idx]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id + else: + self.null_values(self.flow_id) + return self.flow_id + + + + else: + print("error") + self.null_values(self.flow_id) + return self.flow_id + + def rsa_fs_computation(self, src, dst, rate, bidir, band, bandx_id, preferred=None): if band is not None: num_slots_ob = map_band_to_slot(band) print(band, num_slots_ob) @@ -1044,7 +1220,7 @@ class RSA(): if len(path) < 1: self.null_values_ob(self.opt_band_id) return self.opt_band_id, [] - optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob, old_band_x) + optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob, old_band_x, preferred) return None, optical_band_id print("INFO: TP to TP connection") if self.flow_id == 0: @@ -1330,6 +1506,8 @@ class RSA(): print("INFO: Trying to move connection to an existing OB") #first checking in existing OB for ob_id in existing_ob: + if ob_id == ob_idx: + continue if "is_active" in self.optical_bands[ob_id].keys(): is_active = self.optical_bands[ob_id]["is_active"] if not is_active: @@ -1345,13 +1523,14 @@ class RSA(): print(l_slots) print(s_slots) if band_type == "c_slots": - c_slots = [] l_slots =[] + s_slots = [] elif band_type == "l_slots": c_slots = [] - l_slots = [] - elif band_type == "s_slots": s_slots = [] + elif band_type == "s_slots": + c_slots = [] + l_slots =[] if debug: print("OFC26 available slots after reset due to band") print(c_slots) diff --git a/src/opticalcontroller/tools.py b/src/opticalcontroller/tools.py index b9a3e79b6..dfca580f6 100644 --- a/src/opticalcontroller/tools.py +++ b/src/opticalcontroller/tools.py @@ -242,7 +242,7 @@ def get_links_to_node(topology, node): return result -def slot_selection(c, l, s, n_slots, Nc, Nl, Ns): +def slot_selection(c, l, s, n_slots, Nc, Nl, Ns, preferred=None): # First Fit if isinstance(n_slots, int): @@ -253,14 +253,47 @@ def slot_selection(c, l, s, n_slots, Nc, Nl, Ns): slot_c = Nc slot_l = Nl slot_s = Ns - if len(c) >= slot_c: - return "c_slots", c[0: slot_c] - elif len(l) >= slot_l: - return "l_slots", l[0: slot_l] - elif len(s) >= slot_s: - return "s_slots", s[0: slot_s] + if preferred == None or preferred == "ANY": + if len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None else: - return None, None + if preferred == "C_BAND": + if len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None + elif preferred == "L_BAND": + if len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None + elif preferred == "S_BAND": + if len(s) >= slot_s: + return "s_slots", s[0: slot_s] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(c) >= slot_c: + return "c_slots", c[0: slot_c] + else: + return None, None + else: + logging.INFO("PDP: wrong preferred value") + return None, None + def handle_slot (slot_field, slot): for key,value in slot.items() : diff --git a/src/opticalcontroller/variables.py b/src/opticalcontroller/variables.py index 23fbaad24..d271ddeca 100644 --- a/src/opticalcontroller/variables.py +++ b/src/opticalcontroller/variables.py @@ -23,4 +23,4 @@ Nc = 320 #Nc = 10 Ns = 720 -full_links = 0 +full_links = 1 diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index ac5e8afd8..50e8211d4 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -21,7 +21,7 @@ from common.method_wrappers.ServiceExceptions import ( ) from common.proto.context_pb2 import ( Connection, ConstraintActionEnum, Empty, Service, ServiceId, ServiceStatusEnum, - ServiceTypeEnum, TopologyId + ServiceTypeEnum, TopologyId, ContextId ) from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest @@ -45,10 +45,11 @@ from .tools.GeodesicDistance import gps_distance from .tools.OpticalTools import ( add_flex_lightpath, add_lightpath, delete_lightpath, adapt_reply, get_device_name_from_uuid, get_optical_band, refresh_opticalcontroller, DelFlexLightpath , extend_optical_band, - reconfig_flex_lightpath, adapt_reply_ob + reconfig_flex_lightpath, adapt_reply_ob, add_alien_flex_lightpath ) + LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Service', 'RPC') @@ -291,32 +292,55 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): oc_type = 1 bitrate = 100 dj_optical_band_id = None + preferred = "ANY" + alien = 0 + alien_band = 0 + alien_optical_band_id = 0 for constraint in service.service_constraints: - if "bandwidth" in constraint.custom.constraint_type: - bitrate = int(float(constraint.custom.constraint_value)) - elif "bidirectionality" in constraint.custom.constraint_type: - bidir = int(constraint.custom.constraint_value) - elif "optical-band-width" in constraint.custom.constraint_type: - ob_band = int(constraint.custom.constraint_value) - elif "type" in constraint.custom.constraint_type: - logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") - oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) - logging.info(f"{oc_type}") - elif "disjoint_optical_band_id" in constraint.custom.constraint_type: - logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") - dj_optical_band_id = int(constraint.custom.constraint_value) - logging.info(f"{dj_optical_band_id}") + if "alien" in constraint.custom.constraint_type: + alien = 1 + break + for constraint in service.service_constraints: + if alien == 1: + if "alien_spectrum" in constraint.custom.constraint_type: + alien_band = int(constraint.custom.constraint_value) + elif "optical_band_id" in constraint.custom.constraint_type: + alien_optical_band_id = int(constraint.custom.constraint_value) + elif "bidirectionality" in constraint.custom.constraint_type: + bidir = int(constraint.custom.constraint_value) + else: + if "bandwidth" in constraint.custom.constraint_type: + bitrate = int(float(constraint.custom.constraint_value)) + elif "bidirectionality" in constraint.custom.constraint_type: + bidir = int(constraint.custom.constraint_value) + elif "optical-band-width" in constraint.custom.constraint_type: + ob_band = int(constraint.custom.constraint_value) + elif "type" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) + logging.info(f"{oc_type}") + elif "disjoint_optical_band_id" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + dj_optical_band_id = int(constraint.custom.constraint_value) + logging.info(f"{dj_optical_band_id}") + elif "preferred_band" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + preferred = str(constraint.custom.constraint_value) + logging.info(f"{preferred}") reply_txt = "" # to get the reply form the optical module #multi-granular - if oc_type == 1: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) - elif oc_type == 2: - reply_txt = add_lightpath(src, dst, bitrate, bidir) + if alien != 0: + reply_txt = add_alien_flex_lightpath(src, ports[0], dst, ports[1], alien_band, alien_optical_band_id, bidir) else: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) - logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") - logging.info(f"POLIMI {reply_txt}") + if oc_type == 1: + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, preferred, ob_band, dj_optical_band_id) + elif oc_type == 2: + reply_txt = add_lightpath(src, dst, bitrate, bidir) + else: + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, preferred, ob_band, dj_optical_band_id) + #logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") + #logging.info(f"POLIMI {reply_txt}") if reply_txt == None: return service_with_uuids.service_id reply_json = json.loads(reply_txt) @@ -541,7 +565,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): # Set service status to "SERVICESTATUS_UPDATING" to ensure rest of components are aware the service is # being modified. # pylint: disable=no-member - updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + if updated_service.service_type != ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING # Update endpoints # pylint: disable=no-member @@ -614,6 +639,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): #if "ob_id" in c_rules_dict: # ob_id = c_rules_dict["ob_id"] if ("flow_id" in c_rules_dict): + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING flow_id = c_rules_dict["flow_id"] reply_txt = "" # to get the reply form the optical module @@ -644,33 +670,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): new_connection = tmp_connection service_new = optical_reply.services[0] LOGGER.info('QQQQ:{}'.format(service_new)) - - ''' - if len(service.service_config.config_rules) > 0: - c_rules_dict = json.loads( - service.service_config.config_rules[0].custom.resource_value) - ob_id=None - flow_id=None - if "ob_id" in c_rules_dict: - ob_id = c_rules_dict["ob_id"] - if ("flow_id" in c_rules_dict): - flow_id = c_rules_dict["flow_id"] - #if ("ob_id" in c_rules_dict): - # ob_id = c_rules_dict["ob_id"] - params['bitrate']=bitrate - params['dst']=dst - params['src']=src - params['ob_id']=ob_id - params['flow_id']=flow_id - params['bidir'] = bidir - - - tasks_scheduler = TasksScheduler(self.service_handler_factory) - tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) - tasks_scheduler.execute_all() - - ''' - # Feed TaskScheduler with the service to update, the old connection to # deconfigure and the new connection to configure. It will produce a # schedule of tasks (an ordered list of tasks to be executed) to @@ -681,6 +680,109 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.compose_optical_service_update( service_new, old_connection, new_connection) tasks_scheduler.execute_all() + else: + if ("ob_id" in c_rules_dict) and ("low-freq" in c_rules_dict): + LOGGER.info('PDP: it is an optical band') + ob_id = c_rules_dict["ob_id"] + band_txt = get_optical_band(ob_id) + optical_band = json.loads(band_txt) + ''' + optical_band = None + obs = context_client.GetOpticalBand() + for obz in obs: + LOGGER.info(f"PDP: {obz.opticalband_id}") + if obz.opticalband_id == ob_id: + optical_band = obz + if optical_band is not None: + ''' + #optical_band = context_client.SelectOpticalBand(ob_id) + served_flows = optical_band.get('served_lightpaths') + LOGGER.info(f'PDP: served flows {served_flows}') + #context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) + #response = context_client.ListServices(context_id_x) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + service_ids = [] + for service in response.services: + ########## + #service = response.services[0] + if len(service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + service.service_config.config_rules[0].custom.resource_value + ) + if ("flow_id" in c_rules_dict): + flow_id = c_rules_dict["flow_id"] + LOGGER.info(f"PDP checking {flow_id} and {served_flows}") + if flow_id in served_flows: + ########## + updated_service : Optional[Service] = get_service_by_id( + context_client, service.service_id, rw_copy=True, + include_config_rules=False, include_constraints=False, include_endpoint_ids=False) + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + updated_service_id_with_uuids = context_client.SetService(updated_service) + + # PathComp requires endpoints, constraints and config rules + updated_service_with_uuids = get_service_by_id( + context_client, updated_service_id_with_uuids, rw_copy=True, + include_config_rules=True, include_constraints=True, include_endpoint_ids=True) + LOGGER.info('WYY:{}'.format(updated_service_with_uuids)) + + # Get active connection + connections = context_client.ListConnections(updated_service_id_with_uuids) + LOGGER.info('WWWW:{}'.format(connections)) + old_connection = connections.connections[0] + + ''' + for service_idc in service_ids: + service_d = context_client.GetService(service_idc) + c_rules_dict = json.loads( + service_d.service_config.config_rules[0].custom.resource_value) + ''' + LOGGER.info('PDP:{}'.format(c_rules_dict)) + flow_id = c_rules_dict["flow_id"] + reply_txt = "" + # to get the reply form the optical module + #multi-granular + reply_txt = reconfig_flex_lightpath(flow_id) + reply_json = json.loads(reply_txt) + LOGGER.info('[PDP] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) + devices = topology_details.devices + context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid + topology_uuid_x = topology_details.topology_id.topology_uuid.uuid + + device_names : Dict[str, str] = dict() + for device in devices: + device_uuid = device.device_id.device_uuid.uuid + device_names[device_uuid] = device.name + + if reply_txt is not "": + optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") + new_connection = optical_reply.connections[0] + #for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(new_connection) + LOGGER.info('QQQQ_old:{}'.format(str_old_connection)) + LOGGER.info('QQQQ_new:{}'.format(str_candidate_new_connection)) + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + service_new = optical_reply.services[0] + LOGGER.info('QQQQ:{}'.format(service_new)) + + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + #tasks_scheduler.compose_optical_service_update( + # updated_service, old_connection, service_new, new_connection) + tasks_scheduler.compose_optical_service_update( + service_new, old_connection, new_connection) + tasks_scheduler.execute_all() + else: # Find alternative connections diff --git a/src/service/service/service_handlers/oc/OCServiceHandler.py b/src/service/service/service_handlers/oc/OCServiceHandler.py index 27fc89563..7997aba04 100644 --- a/src/service/service/service_handlers/oc/OCServiceHandler.py +++ b/src/service/service/service_handlers/oc/OCServiceHandler.py @@ -93,15 +93,16 @@ class OCServiceHandler(_ServiceHandler): settings = self.__settings_handler.get('/settings') bidir = settings.value.get("bidir") + op_mode = settings.value.get("operational-mode") ob_expansion =settings.value.get('ob-expanded',None) if ob_expansion : if not is_opticalband: LOGGER.info(f"ob-expanded bvalue is: {ob_expansion} and is_opticalband {is_opticalband}") return results - LOGGER.info(f"is_opticalband {is_opticalband}") - LOGGER.info(f"set_Opticalconfig_endpoints is:{endpoints}") - flows = endpoints_to_flows(endpoints, bidir, is_opticalband) - + LOGGER.info(f"PDPis_opticalband {is_opticalband}") + LOGGER.info(f"PDPset_Opticalconfig_endpoints is:{endpoints}") + flows = endpoints_to_flows(endpoints, bidir, is_opticalband, op_mode) + LOGGER.info(f"PDPflows: {flows}") #new cycle for setting optical devices for device_uuid, dev_flows in flows.items(): try: @@ -126,14 +127,17 @@ class OCServiceHandler(_ServiceHandler): service_uuid = self.__service.service_id.service_uuid.uuid chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] - + op_mode = None if self.__settings_handler.get('/settings-ob_{}'.format(connection_uuid)): is_opticalband =True settings = self.__settings_handler.get('/settings-ob_{}'.format(connection_uuid)) else: - settings = self.__settings_handler.get('/settings') + settings = self.__settings_handler.get('/settings') + op_mode = settings.value.get("operational-mode") + bidir = settings.value.get("bidir",None) + results = [] for endpoint in endpoints: @@ -169,7 +173,7 @@ class OCServiceHandler(_ServiceHandler): else: LOGGER.info(f"is_opticalband {is_opticalband}") LOGGER.info(f'RERF endpoints :{endpoints}') - flows = endpoints_to_flows(endpoints, bidir, is_opticalband) + flows = endpoints_to_flows(endpoints, bidir, is_opticalband, op_mode) LOGGER.info(f'RERF:{flows}') for device_uuid, dev_flows in flows.items(): diff --git a/src/service/service/service_handlers/oc/OCTools.py b/src/service/service/service_handlers/oc/OCTools.py index e3fcac3c4..746c29fdb 100644 --- a/src/service/service/service_handlers/oc/OCTools.py +++ b/src/service/service/service_handlers/oc/OCTools.py @@ -286,95 +286,189 @@ def ob_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): return entries -def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): - entries = {} - end = len(endpoints) - i = 0 - #tx tp - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] - - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - entry_tuple = "0", endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 1 - #if bidir reading 4 endpoints per node - if bidir: - log.info(f"i starts with {i} ") - device0 , endpoint0=endpoints[0][0:2] - device1 , endpoint1=endpoints[1][0:2] - finalend=end-2 - if device0 ==device1: - i = i + 1 - - else : - finalend=end-1 - while(i < finalend): - #i - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] +def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, op_mode: int): + if op_mode is not None: + entries = {} + end = len(endpoints) + i = 0 + #tx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - #i+1 - next_endpoint = endpoints[i+1] - next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] - if next_device_uuid == device_uuid: - entry_tuple = endpoint_uuid, next_endpoint_uuid - entries[device_uuid].append(entry_tuple) - else: - log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") - return {} - #i+2 - - next_2_endpoint = endpoints[i+2] - next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] - #i+3 - next_3_endpoint = endpoints[i+3] - next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] - log.info(f"dev {device_uuid} ") - log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") - if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: - entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 4 - else: - log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") - return {} + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = "0", endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 1 + #if bidir reading 4 endpoints per node + if bidir: + log.info(f"i starts with {i} ") + device0 , endpoint0=endpoints[0][0:2] + device1 , endpoint1=endpoints[1][0:2] + finalend=end-2 + if device0 ==device1: + i = i + 1 + else : + finalend=end-1 + while(i < finalend): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + else: + log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") + return {} + #i+2 + + next_2_endpoint = endpoints[i+2] + next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] + #i+3 + next_3_endpoint = endpoints[i+3] + next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] + log.info(f"dev {device_uuid} ") + log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") + if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: + entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 4 + else: + log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") + return {} + else: + while(i < end-1): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 2 + else: + return {} + #rx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = endpoint_uuid, "0", + entries[device_uuid].append(entry_tuple) else: + entries = {} + if len(endpoints) != 4: + log.info(f"PDP : expected alien configuration with 4 endpoints ") + return {} + i = 0 + device0 , endpoint0 = endpoints[0][0:2] + device1 , endpoint1 = endpoints[1][0:2] + device2 , endpoint2 = endpoints[2][0:2] + device3 , endpoint3 = endpoints[3][0:2] + + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device0 == device1: + if device0 not in entries.keys(): + entries[device0] = [] + entry_tuple = endpoint0, endpoint1 + entries[device0].append(entry_tuple) + + if device2 == device3: + if device2 not in entries.keys(): + entries[device2] = [] + entry_tuple = endpoint2, endpoint3 + entries[device2].append(entry_tuple) + + #if bidir reading 4 endpoints per node + ''' + if bidir: + log.info(f"i starts with {i} ") + device0 , endpoint0=endpoints[0][0:2] + device1 , endpoint1=endpoints[1][0:2] + finalend=end-2 + if device0 ==device1: + i = i + 1 + else : + finalend=end-1 + while(i < finalend): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + else: + log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") + return {} + #i+2 + + next_2_endpoint = endpoints[i+2] + next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] + #i+3 + next_3_endpoint = endpoints[i+3] + next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] + log.info(f"dev {device_uuid} ") + log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") + if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: + entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 4 + else: + log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") + return {} + else: while(i < end-1): - #i - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - #i+1 - next_endpoint = endpoints[i+1] - next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] - if next_device_uuid == device_uuid: - entry_tuple = endpoint_uuid, next_endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 2 - else: - return {} - #rx tp - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - entry_tuple = endpoint_uuid, "0", - entries[device_uuid].append(entry_tuple) + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 2 + else: + return {} + #rx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = endpoint_uuid, "0", + entries[device_uuid].append(entry_tuple) + ''' return entries -def endpoints_to_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, is_ob: bool)->Dict: +def endpoints_to_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, is_ob: bool, op_mode: int)->Dict: if is_ob: entries = ob_flows(endpoints, bidir) else: - entries = conn_flows(endpoints, bidir) + entries = conn_flows(endpoints, bidir, op_mode) return entries diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 4fb09718e..60967d7b6 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -137,23 +137,26 @@ def reconfig_flex_lightpath(flow_id) -> str: return reply_bid_txt -def add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) -> str: +def add_flex_lightpath(src, dst, bitrate, bidir, pref, ob_band, dj_optical_band_id) -> str: if not TESTING: urlx = "" headers = {"Content-Type": "application/json"} base_url = get_optical_controller_base_url() + prefs = "ANY" + if pref != None: + prefs = pref if ob_band is None: if bidir is None: bidir = 1 - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir)) else: if bidir is None: bidir = 1 if dj_optical_band_id is None: - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir), str(ob_band)) else: - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band), str(dj_optical_band_id)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir), str(ob_band), str(dj_optical_band_id)) r = requests.put(urlx, headers=headers) print(f"addpathlight {r}") reply = r.text @@ -164,6 +167,19 @@ def add_flex_lightpath(src, dst, bitrate, bidir, ob_band, dj_optical_band_id) -> return reply_uni_txt return reply_bid_txt +def add_alien_flex_lightpath(src, s_port, dst, d_port, band, ob_id, bidir=None) -> str: + urlx = "" + headers = {"Content-Type": "application/json"} + base_url = get_optical_controller_base_url() + #/AddAlienFLexLightpath////// + if bidir is None: + urlx = "{:s}/AddAlienFLexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, s_port, dst, d_port, str(band), str(ob_id)) + else: + urlx = "{:s}/AddAlienFLexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, s_port, dst, d_port, str(band), str(ob_id), str(bidir)) + r = requests.put(urlx, headers=headers) + reply = r.text + return reply + def add_lightpath(src, dst, bitrate, bidir) -> str: if not TESTING: urlx = "" @@ -205,7 +221,7 @@ def DelFlexLightpath( src, dst, bitrate, ob_id, flow_id=None) -> str: if not TESTING: if flow_id is not None: if ob_id is not None : - urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}/{}".format(base_url, src, dst, bitrate, flow_id, ob_id) + urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}".format(base_url, src, dst, flow_id, ob_id) else : #urlx = "http://{}:{}/OpticalTFS/DelOpticalBand/{}/{}/{}".format(OPTICAL_IP, OPTICAL_PORT, src, dst, ob_id) diff --git a/src/service/tests/test_recon.py b/src/service/tests/test_recon.py index 97acdf7e2..4b1a5e7d2 100644 --- a/src/service/tests/test_recon.py +++ b/src/service/tests/test_recon.py @@ -75,13 +75,13 @@ def test_service_recompute_connection( name = service.name print(name) - if name == "optical-connection1": + if name == "optical-band1": response = context_client.ListConnections(service_id) print("AAAAAAAAA") print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # 1 connection per service - str_old_connections = grpc_message_to_json_string(response) + #assert len(response.connections) == 1 # 1 connection per service + #str_old_connections = grpc_message_to_json_string(response) # Change path first time request = Service() @@ -94,8 +94,8 @@ def test_service_recompute_connection( response = context_client.ListConnections(service_id) print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) - assert len(response.connections) == 1 # 1 connection per service - str_new_connections = grpc_message_to_json_string(response) - print(' new connection => {:s}'.format(str_new_connections)) + #assert len(response.connections) == 1 # 1 connection per service + #str_new_connections = grpc_message_to_json_string(response) + #print(' new connection => {:s}'.format(str_new_connections)) diff --git a/src/service/tests/test_recon2.py b/src/service/tests/test_recon2.py new file mode 100644 index 000000000..59637a216 --- /dev/null +++ b/src/service/tests/test_recon2.py @@ -0,0 +1,101 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Service +#from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'src/service/tests/descriptors_recompute_conns.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() + + +def test_service_recompute_connection( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +) -> None: + + # ===== Setup scenario ============================================================================================= + #validate_empty_scenario(context_client) + + # Load descriptors and validate the base scenario + #descriptor_loader = DescriptorLoader( + # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + # service_client=service_client) + #results = descriptor_loader.process() + #check_descriptor_load_results(results, descriptor_loader) + #descriptor_loader.validate() + + + # ===== Recompute Connection ======================================================================================= + response = context_client.ListServices(ADMIN_CONTEXT_ID) + print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + for service in response.services: + #service = response.services[0] + service_id = service.service_id + name = service.name + print(name) + + if name == "optical-band2": + response = context_client.ListConnections(service_id) + print("AAAAAAAAA") + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_old_connections = grpc_message_to_json_string(response) + + # Change path first time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_new_connections = grpc_message_to_json_string(response) + #print(' new connection => {:s}'.format(str_new_connections)) + + -- GitLab From ea4be0045a87841c82c68d91700a605c168a7bc4 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Tue, 10 Feb 2026 12:15:40 +0100 Subject: [PATCH 14/41] working with all new features --- src/opticalcontroller/RSA.py | 116 +++++++++++++++++- .../service/ServiceServiceServicerImpl.py | 6 +- src/service/service/tools/OpticalTools.py | 6 +- src/service/tests/test_recon.py | 2 +- src/service/tests/test_recon2.py | 2 +- 5 files changed, 124 insertions(+), 8 deletions(-) diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index df4453be7..ef76033d3 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -1260,9 +1260,121 @@ class RSA(): existing_ob = self.get_optical_bands(roadm_src, roadm_dst) if len(existing_ob) > 0: + #first checking if provided band id is passed + if preferred is not None: + ob_id = int(preferred) + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if is_active: + op, num_slots = map_rate_to_slot(rate) + if debug: + print(temp_links2) + c_slots, l_slots, s_slots = self.get_slots(temp_links2, num_slots, ob_id) + if debug: + print(c_slots) + print(l_slots) + print(s_slots) + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(temp_links2, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is not None: + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = op + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = temp_links2 + self.db_flows[self.flow_id]["path"] = temp_path + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = ob_id + self.db_flows[self.flow_id]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id, ob_id + else: + print("not enough slots") + print("trying to extend OB {}".format(ob_id)) + new_slots = self.extend_optical_band(ob_id, band=None) + if len(new_slots) > 0: + band_type = self.optical_bands[ob_id]["band_type"] + c_slots = [] + l_slots = [] + s_slots = [] + if band_type == "c_slots": + c_slots = new_slots + elif band_type == "l_slots": + l_slots = new_slots + else: + s_slots = new_slots + op, num_slots = map_rate_to_slot(rate) + if debug: + print(temp_links2) + c_slots, l_slots, s_slots = self.get_slots(temp_links2, num_slots, ob_id) + if debug: + print(c_slots) + print(l_slots) + print(s_slots) + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs( + temp_links2, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is not None: + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = op + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = temp_links2 + self.db_flows[self.flow_id]["path"] = temp_path + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = ob_id + #self.db_flows[flow_id]["new_optical_band"] = 1 + self.db_flows[self.flow_id]["new_optical_band"] = 2 + self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id, ob_id + else: + print("it is not possible to allocate connection in extended OB {}".format(ob_id)) + #checking other existing OB print("INFO: Evaluating existing OB {}".format(existing_ob)) - #first checking in existing OB - ob_found = 0 for ob_id in existing_ob: if "is_active" in self.optical_bands[ob_id].keys(): is_active = self.optical_bands[ob_id]["is_active"] diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 50e8211d4..98b0a2670 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -322,11 +322,15 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): elif "disjoint_optical_band_id" in constraint.custom.constraint_type: logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") dj_optical_band_id = int(constraint.custom.constraint_value) - logging.info(f"{dj_optical_band_id}") + logging.info(f"{dj_optical_band_id}") elif "preferred_band" in constraint.custom.constraint_type: + #used only for optical band specification logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") preferred = str(constraint.custom.constraint_value) logging.info(f"{preferred}") + elif "optical_band_id" in constraint.custom.constraint_type: + #re-use the same tag for the preferred optical band with media-channel + preferred = int(constraint.custom.constraint_value) reply_txt = "" # to get the reply form the optical module #multi-granular diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 60967d7b6..99261647e 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -149,14 +149,14 @@ def add_flex_lightpath(src, dst, bitrate, bidir, pref, ob_band, dj_optical_band_ if ob_band is None: if bidir is None: bidir = 1 - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir)) else: if bidir is None: bidir = 1 if dj_optical_band_id is None: - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir), str(ob_band)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir), str(ob_band)) else: - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), prefs, str(bidir), str(ob_band), str(dj_optical_band_id)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir), str(ob_band), str(dj_optical_band_id)) r = requests.put(urlx, headers=headers) print(f"addpathlight {r}") reply = r.text diff --git a/src/service/tests/test_recon.py b/src/service/tests/test_recon.py index 4b1a5e7d2..b049fc93b 100644 --- a/src/service/tests/test_recon.py +++ b/src/service/tests/test_recon.py @@ -75,7 +75,7 @@ def test_service_recompute_connection( name = service.name print(name) - if name == "optical-band1": + if name == "optical-band-C1": response = context_client.ListConnections(service_id) print("AAAAAAAAA") print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( diff --git a/src/service/tests/test_recon2.py b/src/service/tests/test_recon2.py index 59637a216..2c9fbed7d 100644 --- a/src/service/tests/test_recon2.py +++ b/src/service/tests/test_recon2.py @@ -75,7 +75,7 @@ def test_service_recompute_connection( name = service.name print(name) - if name == "optical-band2": + if name == "optical-band-C2": response = context_client.ListConnections(service_id) print("AAAAAAAAA") print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( -- GitLab From 4f78cbf10ff3741caa1d8c25cb7bc178fa227664 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Tue, 10 Feb 2026 16:37:10 +0100 Subject: [PATCH 15/41] bugfix on the alien media channels --- src/opticalcontroller/RSA.py | 108 +++++++++++++++++++++++++++++++++-- 1 file changed, 104 insertions(+), 4 deletions(-) diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index ef76033d3..182ed1ad1 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -1586,9 +1586,11 @@ class RSA(): print(f"INFO: Reconfiguring connection {flow_idx}") if flow_idx not in self.db_flows.keys(): print(f"ERROR: key not present {flow_idx}") - else: - print(self.db_flows[flow_idx]) + return None, 0 #self.db_flows[flow_idx] = {} + op = self.db_flows[flow_idx]["op-mode"] + if op is None: + return self.alien_reconfig(flow_idx) src = self.db_flows[flow_idx]["src"] dst = self.db_flows[flow_idx]["dst"] rate = self.db_flows[flow_idx]["bitrate"] @@ -1598,14 +1600,12 @@ class RSA(): slots_init = self.db_flows[flow_idx]["slots"] fiber_f = self.db_flows[flow_idx]["fiber_forward"] fiber_b = self.db_flows[flow_idx]["fiber_backward"] - op = self.db_flows[flow_idx]["op-mode"] num_slots = self.db_flows[flow_idx]["n_slots"] links = self.db_flows[flow_idx]["links"] path = self.db_flows[flow_idx]["path"] band = self.db_flows[flow_idx]["band"] f0 = self.db_flows[flow_idx]["freq"] ob_idx = self.db_flows[flow_idx]["parent_opt_band"] - r1 = "" r2 = "" if len(links) == 2: @@ -1689,6 +1689,106 @@ class RSA(): continue print("not enough slots") return None, 0 + + + def alien_reconfig(self, flow_idx): + print(f"INFO: Reconfiguring alien connection {flow_idx}") + if flow_idx not in self.db_flows.keys(): + print(f"ERROR: key not present {flow_idx}") + return None, 0 + op = self.db_flows[flow_idx]["op-mode"] + src = self.db_flows[flow_idx]["src"] + dst = self.db_flows[flow_idx]["dst"] + rate = self.db_flows[flow_idx]["bitrate"] + bidir = self.db_flows[flow_idx]["bidir"] + flow_list = self.db_flows[flow_idx]["flows"] + band_type = self.db_flows[flow_idx]["band_type"] + slots_init = self.db_flows[flow_idx]["slots"] + fiber_f = self.db_flows[flow_idx]["fiber_forward"] + fiber_b = self.db_flows[flow_idx]["fiber_backward"] + num_slots = self.db_flows[flow_idx]["n_slots"] + links = self.db_flows[flow_idx]["links"] + path = self.db_flows[flow_idx]["path"] + band = self.db_flows[flow_idx]["band"] + f0 = self.db_flows[flow_idx]["freq"] + ob_idx = self.db_flows[flow_idx]["parent_opt_band"] + existing_ob = self.get_optical_bands(src, dst) + if len(existing_ob) > 0: + print("INFO: Trying to move connection to an existing OB") + #first checking in existing OB + for ob_id in existing_ob: + if ob_id == ob_idx: + continue + if not band_type in self.optical_bands[ob_id].keys(): + continue + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if not is_active: + continue + c_slots, l_slots, s_slots = self.get_alien_slots(ob_id, num_slots) + if debug: + print("OFC26 available slots pre") + print(c_slots) + print(l_slots) + print(s_slots) + if band_type == "c_slots": + l_slots =[] + s_slots = [] + elif band_type == "l_slots": + c_slots = [] + s_slots = [] + elif band_type == "s_slots": + c_slots = [] + l_slots =[] + if debug: + print("OFC26 available slots after reset due to band") + print(c_slots) + print(l_slots) + print(s_slots) + s_port = flow_list[src]["f"]["in"] + d_port = flow_list[dst]["f"]["out"] + #{'MGON1': {'f': {'in': 'port-25-in', 'out': 'port-9-out'}, 'b': {}}, 'MGON3': {'f': {'in': 'port-1-in', 'out': 'port-25-out'}, 'b': {}}} + + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.alien_select_slots_and_ports_fs(s_port, d_port, num_slots, + c_slots, l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + + if debug: + print(f0, band) + print("INFO: RSA completed for alien Flex Lightpath with OB already in place") + if flow_list is None: + continue + slots_i = [] + for i in slots: + slots_i.append(int(i)) + self.db_flows[flow_idx]["flows"] = flow_list + self.db_flows[flow_idx]["band_type"] = band_range + self.db_flows[flow_idx]["slots"] = slots_i + self.db_flows[flow_idx]["fiber_forward"] = fiber_f + self.db_flows[flow_idx]["fiber_backward"] = fiber_b + #self.db_flows[flow_idx]["op-mode"] = op + self.db_flows[flow_idx]["n_slots"] = num_slots + #self.db_flows[flow_idx]["links"] = temp_links2 + #self.db_flows[flow_idx]["path"] = temp_path + self.db_flows[flow_idx]["band"] = band + self.db_flows[flow_idx]["freq"] = f0 + self.db_flows[flow_idx]["is_active"] = True + self.db_flows[flow_idx]["parent_opt_band"] = ob_id + self.db_flows[flow_idx]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(flow_idx) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + self.move_flow(flow_idx, slots_init, band_type, links, bidir, ob_idx) + return flow_idx, ob_id + else: + continue + print("not enough slots") + return None, 0 def extend_optical_band(self, ob_id, band=None): -- GitLab From 29342a07d9bc19954ffeecf2628105efc12e705a Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 16 Feb 2026 00:32:57 +0000 Subject: [PATCH 16/41] Updated Telemetry Backend gNMI collector to add support for MGON power streaming. - Add skip_verify for MGON support. - Added another path - messages.py (telemetry backend) adde skip_verify --- .../gnmi_oc/GnmiOpenConfigCollector.py | 16 +++++++++------- .../service/collectors/gnmi_oc/PathMapper.py | 2 ++ src/telemetry/backend/tests/gnmi_oc/messages.py | 2 ++ .../gnmi_oc/test_unit_GnmiOpenConfigCollector.py | 15 ++++++++------- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index 602d2f8d9..96a334b74 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -37,9 +37,10 @@ class GNMIOpenConfigCollector(_Collector): super().__init__('gNMI_openconfig_collector', address, port, **setting) self._subscriptions : Dict[str, Subscription] = {} - self.username = setting.get('username', 'admin') - self.password = setting.get('password', 'admin') - self.insecure = setting.get('insecure', True) + self.username = setting.get('username', 'admin') + self.password = setting.get('password', 'admin') + self.insecure = setting.get('insecure', True) + self.skip_verify = setting.get('skip_verify', False) # For TLS certificate verification # self.username = username # self.password = password # self.insecure = insecure @@ -58,10 +59,11 @@ class GNMIOpenConfigCollector(_Collector): """ if not self.connected: self.client = gNMIclient( - target=(self.address, self.port), - username=self.username, - password=self.password, - insecure=self.insecure + target = (self.address, self.port), + username = self.username, + password = self.password, + insecure = self.insecure, + skip_verify = self.skip_verify # Skip TLS certificate verification (like gnmi_subscribe_example.py) ) # self.logger.info("Connecting to gNMI target %s:%s with %s and %s", self.address, self.port, self.username, self.password) self.client.connect() # type: ignore diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py index dc467d1f9..0e1e85414 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py @@ -89,6 +89,8 @@ class PathMapper: # Uses oc-wave-router and fsmgon module prefixes to avoid origin extraction issues _WAVELENGTH_ROUTER_PREFIXES = [ 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index={endpoint}]/state/{leaf}', + # Also try without the root prefix as fallback + 'wavelength-router/fsmgon:optical-bands/optical-band[index={endpoint}]/state/{leaf}', ] # --------------------------------------------------------------# # Public helper # diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index 1b7e4f566..8a4eb6e0b 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -59,6 +59,7 @@ devices = { 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT, 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model 'endpoint': '1', + 'skip_verify': True, }, } @@ -75,6 +76,7 @@ def creat_basic_sub_request_parameters() -> dict: 'password' : device['password'], 'connect_timeout' : 15, 'insecure' : device['insecure'], + 'skip_verify' : device.get('skip_verify', True), 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample 'sample_interval_ns': '3s', 'sample_interval' : '10s', diff --git a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py index ccdb6d38b..920c88c91 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py @@ -46,11 +46,12 @@ def sub_parameters(): def collector(sub_parameters): """Fixture to create and connect GNMI collector.""" collector = GNMIOpenConfigCollector( - username = sub_parameters['username'], - password = sub_parameters['password'], - insecure = sub_parameters['insecure'], - address = sub_parameters['target'][0], - port = sub_parameters['target'][1], + username = sub_parameters['username'], + password = sub_parameters['password'], + insecure = sub_parameters['insecure'], + address = sub_parameters['target'][0], + port = sub_parameters['target'][1], + skip_verify = sub_parameters.get('skip_verify', True), ) collector.Connect() yield collector @@ -69,7 +70,7 @@ def subscription_data(sub_parameters): "endpoint" : sub_parameters['endpoint'], "resource" : sub_parameters['resource'], }, - float(10.0), + float(20.0), float(5.0), ), ] @@ -109,7 +110,7 @@ def test_get_state_updates(collector, subscription_data): assert len(updates_received) > 0 -def test_unsubscribe_state(collector, subscription_data, kpi_manager_client, context_client): +def test_unsubscribe_state(collector, subscription_data): """Test unsubscribing from state.""" logger.info("----- Testing Unsubscribe -----") collector.SubscribeState(subscription_data) -- GitLab From 0c182e3fc96874cc6ea93f2747ce61f1c9e07ade Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Tue, 24 Feb 2026 09:25:49 +0100 Subject: [PATCH 17/41] last chnge in RSA --- src/opticalcontroller/RSA.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 182ed1ad1..dc008725b 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -871,17 +871,20 @@ class RSA(): self.db_flows[self.flow_id]["bitrate"] = rate self.db_flows[self.flow_id]["bidir"] = bidir + #@Chafy links, path = self.compute_path(src, dst) if len(path) < 1: self.null_values(self.flow_id) return self.flow_id op, num_slots = map_rate_to_slot(rate) + #@Chafy c_slots, l_slots, s_slots = self.get_slots(links, num_slots) if debug: print(c_slots) print(l_slots) print(s_slots) + #@Chafy if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0: flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir) @@ -1189,8 +1192,6 @@ class RSA(): self.null_values(self.flow_id) return self.flow_id - - else: print("error") self.null_values(self.flow_id) -- GitLab From 2f24be4b6e60a1e691661e1869b9306a21d9c12a Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 24 Feb 2026 08:33:11 +0000 Subject: [PATCH 18/41] Update test scripts and add MGON agent support in analytics and telemetry modules --- .../run_tests_locally-analytics-backend.sh | 9 ++-- .../run_tests_locally-analytics-frontend.sh | 6 ++- scripts/run_tests_locally-telemetry-gnmi.sh | 6 +-- src/analytics/backend/service/Streamer.py | 12 ++--- .../backend/tests/messages_analyzer.py | 19 +++++++ src/analytics/frontend/tests/messages.py | 54 +++++++++++++++++++ src/analytics/frontend/tests/test_frontend.py | 26 ++++++--- .../backend/service/HelperMethods.py | 6 +-- .../service/TelemetryBackendService.py | 4 +- .../backend/service/collectors/gnmi_oc/KPI.py | 19 +++---- .../service/collectors/gnmi_oc/PathMapper.py | 16 +++--- .../backend/tests/gnmi_oc/messages.py | 8 +-- 12 files changed, 137 insertions(+), 48 deletions(-) diff --git a/scripts/run_tests_locally-analytics-backend.sh b/scripts/run_tests_locally-analytics-backend.sh index 468894271..44e26bacb 100755 --- a/scripts/run_tests_locally-analytics-backend.sh +++ b/scripts/run_tests_locally-analytics-backend.sh @@ -19,10 +19,13 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9092' +export KFK_SERVER_ADDRESS='127.0.0.1:9094' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" -python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ - analytics/backend/tests/test_backend.py +# python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ +# analytics/backend/tests/test_backend.py + +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/backend/tests/test_backend.py::test_start_analytics_backend_for_mgon_agent \ No newline at end of file diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh index 3d9fcd290..4d999f625 100755 --- a/scripts/run_tests_locally-analytics-frontend.sh +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -19,9 +19,11 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9092' +export KFK_SERVER_ADDRESS='127.0.0.1:9094' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ - analytics/frontend/tests/test_frontend.py + analytics/frontend/tests/test_frontend.py::test_StartAnalyzer_MGON_Agent +# python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ +# analytics/frontend/tests/test_frontend.py diff --git a/scripts/run_tests_locally-telemetry-gnmi.sh b/scripts/run_tests_locally-telemetry-gnmi.sh index a3a5f2b9d..965141e56 100755 --- a/scripts/run_tests_locally-telemetry-gnmi.sh +++ b/scripts/run_tests_locally-telemetry-gnmi.sh @@ -21,10 +21,10 @@ export KFK_SERVER_ADDRESS='127.0.0.1:9094' # This is unit test (should be tested with container-lab running) python3 -m pytest --log-level=info --log-cli-level=info --verbose \ - telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py + telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py::test_full_workflow # This is integration test (should be tested with container-lab running) -python3 -m pytest --log-level=info --log-cli-level=info --verbose \ - telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py +# python3 -m pytest --log-level=info --log-cli-level=info --verbose \ +# telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py echo "Bye!" diff --git a/src/analytics/backend/service/Streamer.py b/src/analytics/backend/service/Streamer.py index ff0b10ef5..aef093fb5 100644 --- a/src/analytics/backend/service/Streamer.py +++ b/src/analytics/backend/service/Streamer.py @@ -28,12 +28,12 @@ logger = logging.getLogger(__name__) class DaskStreamer(threading.Thread): def __init__(self, key, input_kpis, output_kpis, thresholds, - batch_size = 5, - batch_duration = None, - window_size = None, - cluster_instance = None, - producer_instance = AnalyzerHelper.initialize_kafka_producer() - ): + batch_size = 5, + batch_duration = None, + window_size = None, + cluster_instance = None, + producer_instance = AnalyzerHelper.initialize_kafka_producer() + ) -> None: super().__init__() self.key = key self.input_kpis = input_kpis diff --git a/src/analytics/backend/tests/messages_analyzer.py b/src/analytics/backend/tests/messages_analyzer.py index bed594300..963cf37e9 100644 --- a/src/analytics/backend/tests/messages_analyzer.py +++ b/src/analytics/backend/tests/messages_analyzer.py @@ -15,6 +15,25 @@ import pandas as pd from analytics.backend.service.AnalyzerHandlers import Handlers +def create_analysis_request_message_for_mgon_agent(): + return { + "request_id": "test_request_mgon_001", + "oper_mode": "test_mode", + "input_kpi_list": ["6e22f180-ba28-4641-b190-2287bf448888"], + "output_kpi_list": ["6e22f180-ba28-4641-b190-2287bf181818"], + "task_type": Handlers.AGGREGATION_HANDLER.value, + "task_parameter": { + "avg": [-5, -20], + }, + "duration": 90, + "batch_duration": 20, + "window_size": None, + "batch_size": 5, + "interval": 5, + } + + + def get_input_kpi_list(): return ["1e22f180-ba28-4641-b190-2287bf446666", "6e22f180-ba28-4641-b190-2287bf448888", 'kpi_3'] diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py index 4dc5b96b8..563de0592 100644 --- a/src/analytics/frontend/tests/messages.py +++ b/src/analytics/frontend/tests/messages.py @@ -18,6 +18,8 @@ from common.proto.kpi_manager_pb2 import KpiId from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, AnalyzerId, Analyzer, AnalyzerFilter ) +# function to create analyzer based on + def create_analyzer_id(): _create_analyzer_id = AnalyzerId() _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) @@ -119,6 +121,58 @@ def create_analyzer_filter(): return _create_analyzer_filter +def create_analyzer_for_mgon_agent(): + """ + Create analyzer for MGON agent test with aggregation handler. + Returns: + Analyzer: Configured analyzer for MGON agent testing + """ + _create_analyzer = Analyzer() + + # Set analyzer ID + _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + + # Set algorithm name and operation mode + _create_analyzer.algorithm_name = "Test_MGON_Aggregation" + _create_analyzer.operation_mode = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + + # Input KPI ID + _input_kpi_id = KpiId() + _input_kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + _create_analyzer.input_kpi_ids.append(_input_kpi_id) + + # Output KPI ID + _output_kpi_id = KpiId() + _output_kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf181818" + _create_analyzer.output_kpi_ids.append(_output_kpi_id) + + # Task parameters - aggregation with average threshold + _threshold_dict = { + "task_type": Handlers.AGGREGATION_HANDLER.value, + "task_parameter": { + "avg": [-5, -10], + }, + } + + _create_analyzer.parameters['thresholds'] = json.dumps(_threshold_dict) + _create_analyzer.parameters['window_size'] = "0" # No sliding window + _create_analyzer.parameters['window_slider'] = "0" + _create_analyzer.parameters['store_aggregate'] = str(False) + _create_analyzer.parameters['interval'] = "5" # Interval for batch processing + + # Duration of the analyzer + _create_analyzer.duration_s = 900 + + # Batch duration settings + _create_analyzer.batch_min_duration_s = 20 + _create_analyzer.batch_max_duration_s = 20 + + # Batch size settings + _create_analyzer.batch_min_size = 5 + _create_analyzer.batch_max_size = 5 + + return _create_analyzer + # Added for testing to remove the dependency on the backend service from enum import Enum diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py index b49062a81..bc210889a 100644 --- a/src/analytics/frontend/tests/test_frontend.py +++ b/src/analytics/frontend/tests/test_frontend.py @@ -28,7 +28,7 @@ from common.tools.kafka.Variables import KafkaTopic from common.proto.analytics_frontend_pb2 import AnalyzerId, AnalyzerList from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService -from analytics.frontend.tests.messages import ( create_analyzer_id, create_analyzer, +from analytics.frontend.tests.messages import ( create_analyzer_for_mgon_agent, create_analyzer_id, create_analyzer, create_analyzer_filter ) from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl from apscheduler.schedulers.background import BackgroundScheduler @@ -38,15 +38,13 @@ from apscheduler.triggers.interval import IntervalTrigger ########################### # Tests Setup ########################### - +LOGGER = logging.getLogger(__name__) LOCAL_HOST = '127.0.0.1' ANALYTICS_FRONTEND_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.ANALYTICS)) os.environ[get_env_var_name(ServiceNameEnum.ANALYTICS, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.ANALYTICS, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(ANALYTICS_FRONTEND_PORT) -LOGGER = logging.getLogger(__name__) - @pytest.fixture(scope='session') def analyticsFrontend_service(): LOGGER.info('Initializing AnalyticsFrontendService...') @@ -93,10 +91,10 @@ def log_all_methods(request): ########################### # --- "test_validate_kafka_topics" should be executed before the functionality tests --- -def test_validate_kafka_topics(): - LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") - response = KafkaTopic.create_all_topics() - assert isinstance(response, bool) +# def test_validate_kafka_topics(): +# LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") +# response = KafkaTopic.create_all_topics() +# assert isinstance(response, bool) # To test start and stop listener together def test_StartAnalyzers(analyticsFrontend_client): @@ -133,3 +131,15 @@ def test_StartAnalyzers(analyticsFrontend_client): # for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid): # LOGGER.debug(response) # assert isinstance(response, tuple) + +# Additional tests can be added below following the same structure +# --- MGON Agent Tests --- +def test_StartAnalyzer_MGON_Agent(analyticsFrontend_client): + added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer_for_mgon_agent()) + LOGGER.debug(str(added_analyzer_id)) + LOGGER.info("waiting for timer to complete 3000 seconds ...") + time.sleep(3000) + LOGGER.info('--> StopAnalyzer after timer completion') + response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id) + LOGGER.debug(str(response)) +# --- End of MGON Agent Tests --- diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index 19107be6b..5ba57dc7b 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -107,9 +107,9 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri if the KPI ID is not found or the collector cannot be created. """ LOGGER.info(f"Getting collector for KPI ID: {kpi_id}") - kpi_id_obj = KpiId() + kpi_id_obj = KpiId() kpi_id_obj.kpi_id.uuid = kpi_id # pyright: ignore[reportAttributeAccessIssue] - kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) + kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) # LOGGER.info(f"KPI Descriptor: {kpi_descriptor}") if not kpi_descriptor: raise Exception(f"KPI ID: {kpi_id} - Descriptor not found.") #TODO: Change to TFS NotFoundException @@ -122,7 +122,7 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri ) # Getting device collector (testing) - collector : _Collector = get_driver(driver_instance_cache, device) + collector : _Collector = get_driver(driver_instance_cache, device) # NOTE: driver_instance_cache is define in collector_api.DriverInstanceCache if collector is None: raise Exception(f"KPI ID: {kpi_id} - Collector not found for device {device.device_uuid.uuid}.") #TODO: Change to TFS NotFoundException # LOGGER.info(f"Collector for KPI ID: {kpi_id} - {collector.__class__.__name__}") diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index 28b0c3989..13badc323 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -135,7 +135,7 @@ class TelemetryBackendService(GenericGrpcService): """ Method to handle collector request. """ - # CONFIRM: The method (get_collector_by_kpi_id) is working correctly. testcase in integration tests. + # CONFIRMED: The method (get_collector_by_kpi_id) is working correctly. testcase in integration tests. self.device_collector = get_collector_by_kpi_id( kpi_id, self.kpi_manager_client, self.context_client, self.driver_instance_cache) @@ -144,7 +144,7 @@ class TelemetryBackendService(GenericGrpcService): raise Exception(f"KPI ID: {kpi_id} - Collector not found.") LOGGER.info(("----- Number done -----")) - # CONFIRM: The method (get_subscription_parameters) is working correctly. testcase in telemetery backend tests. + # CONFIRMED: The method (get_subscription_parameters) is working correctly. testcase in telemetery backend tests. resource_to_subscribe = get_subscription_parameters( kpi_id, self.kpi_manager_client, self.context_client, duration, interval ) diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py index 4a57ab8dc..0e902fa9c 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py @@ -16,14 +16,15 @@ from enum import IntEnum, unique @unique -class KPI(IntEnum): # TODO: verify KPI names and codes with KPI proto file. (How many TFS supports) +class KPI(IntEnum): + # TODO: verify KPI names and codes with KPI proto file. (How many TFS supports) """Generic KPI codes that map to interface statistics.""" - PACKETS_TRANSMITTED = 101 - PACKETS_RECEIVED = 102 - PACKETS_DROPPED = 103 - BYTES_TRANSMITTED = 201 - BYTES_RECEIVED = 202 - BYTES_DROPPED = 203 - INBAND_POWER = 301 - KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 302 + KPISAMPLETYPE_PACKETS_TRANSMITTED = 101 + KPISAMPLETYPE_PACKETS_RECEIVED = 102 + KPISAMPLETYPE_PACKETS_DROPPED = 103 + KPISAMPLETYPE_BYTES_TRANSMITTED = 201 + KPISAMPLETYPE_BYTES_RECEIVED = 202 + KPISAMPLETYPE_BYTES_DROPPED = 203 + KPISAMPLETYPE_INBAND_POWER = 301 + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 302 # TODO: Add more KPIs as needed, diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py index 0e1e85414..fb1d54952 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py @@ -39,24 +39,24 @@ class PathMapper: # in the future. The list is not exhaustive, but it covers the most common cases # across OpenConfig implementations. The collector will try each until one succeeds. # ---- packets --------------------------------------------------- - KPI.PACKETS_TRANSMITTED: [ + KPI.KPISAMPLETYPE_PACKETS_TRANSMITTED: [ "out-pkts", "out-unicast-pkts", "tx-pkts", "packets-output" ], - KPI.PACKETS_RECEIVED: [ + KPI.KPISAMPLETYPE_PACKETS_RECEIVED: [ "in-pkts", "in-unicast-pkts", "rx-pkts", "packets-input" ], - KPI.PACKETS_DROPPED: [ + KPI.KPISAMPLETYPE_PACKETS_DROPPED: [ "in-discards", "out-discards", "packets-drop" ], # ---- bytes ----------------------------------------------------- - KPI.BYTES_TRANSMITTED: [ + KPI.KPISAMPLETYPE_BYTES_TRANSMITTED: [ "out-octets", "tx-octets", "bytes-output" ], - KPI.BYTES_RECEIVED: [ + KPI.KPISAMPLETYPE_BYTES_RECEIVED: [ "in-octets", "rx-octets", "bytes-input" ], - KPI.BYTES_DROPPED: [ + KPI.KPISAMPLETYPE_BYTES_DROPPED: [ "in-octets-discarded", "out-octets-discarded", "bytes-drop" ], @@ -64,13 +64,13 @@ class PathMapper: # Note: Inband power is not a standard leaf in OpenConfig, but # it is included here for completeness. The actual leaf names # may vary by implementation. - KPI.INBAND_POWER: [ + KPI.KPISAMPLETYPE_INBAND_POWER: [ "inband-power", "inband-power-state" ], # ---- total power (optical wavelength router) ---------------- # For optical devices using FlexScale MGON YANG model - KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT : [ + KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER : [ "optical-power-total-input/instant", ], } diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index 8a4eb6e0b..7a6a9baf0 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -26,7 +26,7 @@ devices = { 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, - 'kpi' : KPI.PACKETS_RECEIVED, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, 'resource': 'interface', 'endpoint': 'Management0', }, @@ -36,7 +36,7 @@ devices = { 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, - 'kpi' : KPI.PACKETS_RECEIVED, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, 'resource': 'interface', 'endpoint': 'Management0', }, @@ -46,7 +46,7 @@ devices = { 'username': 'admin', 'password': 'admin', 'insecure': True, - 'kpi' : KPI.PACKETS_RECEIVED, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, 'resource': 'interface', 'endpoint': 'Management0', }, @@ -56,7 +56,7 @@ devices = { 'username': 'admin', 'password': 'admin', 'insecure': True, - 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model 'endpoint': '1', 'skip_verify': True, -- GitLab From 1d230e865a4692900257a70332d7855ee4f5461e Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Wed, 25 Feb 2026 23:56:59 +0100 Subject: [PATCH 19/41] base for optical automation --- .gitignore | 2 + manifests/telemetryservice.yaml | 2 +- proto/kpi_sample_types.proto | 1 + .../gnmi_oc/GnmiOpenConfigCollector.py | 6 +- .../collectors/gnmi_oc/SubscriptionNew.py | 8 +- .../backend/tests/gnmi_oc/messages.py | 16 +- .../test_unit_GnmiOpenConfigCollector.py | 2 +- .../automation/descriptors/automation.json | 10 +- src/tests/ofc26_flexscale/Fixtures.py | 63 +++++ src/tests/ofc26_flexscale/__init__.py | 14 + .../ofc26_flexscale/mock_tfs_services.py | 74 ++++++ src/tests/ofc26_flexscale/my_deploy_ofc26.sh | 240 ++++++++++++++++++ src/tests/ofc26_flexscale/run_ofc26_test.sh | 25 ++ .../ofc26_flexscale/telemetry_service.py | 0 .../ofc26_flexscale/test_ofc26_messages.py | 66 +++++ .../test_ofc26_mgon_integration.py | 116 +++++++++ 16 files changed, 623 insertions(+), 22 deletions(-) create mode 100644 src/tests/ofc26_flexscale/Fixtures.py create mode 100644 src/tests/ofc26_flexscale/__init__.py create mode 100644 src/tests/ofc26_flexscale/mock_tfs_services.py create mode 100755 src/tests/ofc26_flexscale/my_deploy_ofc26.sh create mode 100755 src/tests/ofc26_flexscale/run_ofc26_test.sh create mode 100644 src/tests/ofc26_flexscale/telemetry_service.py create mode 100644 src/tests/ofc26_flexscale/test_ofc26_messages.py create mode 100644 src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py diff --git a/.gitignore b/.gitignore index d5af4f7f6..1b9e692a3 100644 --- a/.gitignore +++ b/.gitignore @@ -177,6 +177,8 @@ cython_debug/ # Sqlite *.db +#temp files to test telemetry +src/telemetry/backend/tempFiles/ # TeraFlowSDN-generated files tfs_runtime_env_vars.sh tfs_runtime_env_vars*.sh diff --git a/manifests/telemetryservice.yaml b/manifests/telemetryservice.yaml index 2add96516..935267250 100644 --- a/manifests/telemetryservice.yaml +++ b/manifests/telemetryservice.yaml @@ -95,7 +95,7 @@ metadata: app: telemetryservice spec: type: LoadBalancer - loadBalancerIP: 192.168.5.250 + loadBalancerIP: _LOAD_BALANCER_IP_ externalTrafficPolicy: Local selector: app: telemetryservice diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto index 19cd59f15..a8e25e809 100644 --- a/proto/kpi_sample_types.proto +++ b/proto/kpi_sample_types.proto @@ -32,6 +32,7 @@ enum KpiSampleType { KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602; diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index 60364c4cd..63556d4e9 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -33,7 +33,7 @@ class GNMIOpenConfigCollector(_Collector): ========================= Lightweight wrapper around *pygnmi* with subscribe / get / unsubscribe helpers. """ - def __init__(self, address: str = '', port: int = -1, **setting) -> None: + def __init__(self, address: str = "", port: int = -1, **setting) -> None: super().__init__('gNMI_openconfig_collector', address, port, **setting) self._subscriptions : Dict[str, Subscription] = {} @@ -51,13 +51,15 @@ class GNMIOpenConfigCollector(_Collector): self._output_queue = queue.Queue() # Queue for telemetry updates self.logger = logging.getLogger(__name__) - self.logger.debug("GNMICollector instantiated.") + self.logger.info("GNMICollector instantiated.") def Connect(self) -> bool: """ Connect to the gNMI target device. """ + self.logger.info("Connecting to gNMI target %s:%s with username '%s'", + self.address, self.port, self.username) if not self.connected: self.client = gNMIclient( target = (self.address, self.port), diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py index bbbc9fdb2..c48ba505c 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py @@ -229,7 +229,7 @@ class Subscription: LOGGER.debug("Sub %s stop requested, breaking stream loop", self.sub_id) break - LOGGER.debug("Sub %s received stream message: %s", self.sub_id, stream) + LOGGER.info("Sub %s received stream message: %s", self.sub_id, stream) # DEBUG: Check if update has actual update messages if stream.HasField("update"): @@ -269,12 +269,10 @@ class Subscription: LOGGER.debug("Sub %s cancelled (channel closed) - graceful shutdown", self.sub_id) break elif err.code() == grpc.StatusCode.INVALID_ARGUMENT: - LOGGER.warning("Path '%s' rejected (%s) -- trying next", - path, err.details()) + LOGGER.warning("Path '%s' rejected (%s) -- trying next", path, err.details()) continue else: - LOGGER.exception("Subscription %s hit gRPC error: %s", # Change with TFS Exception - self.sub_id, err) + LOGGER.exception("Subscription %s hit gRPC error: %s", self.sub_id, err) # Change with TFS Exception break except Exception as exc: # pylint: disable=broad-except diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index 7a6a9baf0..9cad7edef 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -51,14 +51,14 @@ devices = { 'endpoint': 'Management0', }, 'mgon': { - 'host' : 'localhost', - 'port' : '50061', - 'username': 'admin', - 'password': 'admin', - 'insecure': True, - 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, - 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model - 'endpoint': '1', + 'host' : '172.17.254.24', + 'port' : '50061', + 'username' : 'admin', + 'password' : 'admin', + 'insecure' : True, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, + 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint' : '1', 'skip_verify': True, }, } diff --git a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py index 920c88c91..c08f196d1 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py @@ -70,7 +70,7 @@ def subscription_data(sub_parameters): "endpoint" : sub_parameters['endpoint'], "resource" : sub_parameters['resource'], }, - float(20.0), + float(60.0), float(5.0), ), ] diff --git a/src/tests/automation/descriptors/automation.json b/src/tests/automation/descriptors/automation.json index 5876de4ae..d8c36cac0 100644 --- a/src/tests/automation/descriptors/automation.json +++ b/src/tests/automation/descriptors/automation.json @@ -1,19 +1,19 @@ { "target_service_id": { - "service_uuid": {"uuid": "66d498ad-5d94-5d90-8cb4-861e30689c64"}, + "service_uuid": {"uuid": "b2a60c5b-8c46-5707-a64a-9c6539d395f2"}, "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}} }, "telemetry_service_id": { - "service_uuid": {"uuid": "db73d789-4abc-5514-88bb-e21f7e31d36a"}, + "service_uuid": {"uuid": "7397bdf2-eec8-57f4-9406-4f9e9f3dc50e"}, "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}} }, "analyzer":{ "operation_mode": "ANALYZEROPERATIONMODE_STREAMING", "parameters": { - "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [0, 2500]}]}" + "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [-5, -15]}]}" }, "input_kpi_ids": [ - {"kpi_id": { "uuid": "b9f915e2-402d-4788-9e7d-6bd1055b5e8b"}} + {"kpi_id": { "uuid": "6e22f180-ba28-4641-b190-2287bf447777"}} ], "output_kpi_ids": [ {"kpi_id": { "uuid": "c45b09d8-c84a-45d8-b4c2-9fa9902d157d"}} @@ -24,7 +24,7 @@ "policy":{ "serviceId": { "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, - "service_uuid": {"uuid": "66d498ad-5d94-5d90-8cb4-861e30689c64"} + "service_uuid": {"uuid": "b2a60c5b-8c46-5707-a64a-9c6539d395f2"} }, "policyRuleBasic": { "actionList": [ diff --git a/src/tests/ofc26_flexscale/Fixtures.py b/src/tests/ofc26_flexscale/Fixtures.py new file mode 100644 index 000000000..c5b66b858 --- /dev/null +++ b/src/tests/ofc26_flexscale/Fixtures.py @@ -0,0 +1,63 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import logging + +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from kpi_manager.client.KpiManagerClient import KpiManagerClient + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(host="10.152.183.180") + _client.connect() + LOGGER.info('Yielding Connected ContextClient...') + yield _client + LOGGER.info('Closing ContextClient...') + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(host="10.152.183.212") + _client.connect() + LOGGER.info('Yielding Connected DeviceClient...') + yield _client + LOGGER.info('Closing DeviceClient...') + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(host="10.152.183.98") + _client.connect() + LOGGER.info('Yielding Connected ServiceClient...') + yield _client + LOGGER.info('Closing ServiceClient...') + _client.close() + +@pytest.fixture(scope='session') +def kpi_manager_client(): + _client = KpiManagerClient(host="10.152.183.121") + _client.connect() + LOGGER.info('Yielding Connected KpiManagerClient...') + yield _client + LOGGER.info('Closed KpiManagerClient...') + _client.close() + diff --git a/src/tests/ofc26_flexscale/__init__.py b/src/tests/ofc26_flexscale/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/ofc26_flexscale/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ofc26_flexscale/mock_tfs_services.py b/src/tests/ofc26_flexscale/mock_tfs_services.py new file mode 100644 index 000000000..6cbacd610 --- /dev/null +++ b/src/tests/ofc26_flexscale/mock_tfs_services.py @@ -0,0 +1,74 @@ + + + +import os, pytest +import logging +from typing import Union + +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) +from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server + +from common.tools.service.GenericGrpcService import GenericGrpcService + +from kpi_manager.service.KpiManagerService import KpiManagerService +from kpi_manager.client.KpiManagerClient import KpiManagerClient + + + +LOGGER = logging.getLogger(__name__) + +LOCAL_HOST = '127.0.0.1' + +KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) # type: ignore +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT) + + +# NOTE: ---> For local testing, without need of running TFS services. + +class MockContextService(GenericGrpcService): + # Mock Service implementing Context to simplify unitary tests of Monitoring + + def __init__(self, bind_port: Union[str, int]) -> None: + super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + self.context_servicer = MockServicerImpl_Context() + add_ContextServiceServicer_to_server(self.context_servicer, self.server) + +# This fixture will be requested by test cases and last during testing session +@pytest.fixture(scope='session') +def kpi_manager_service(): + LOGGER.info('Initializing KpiManagerService...') + _service = KpiManagerService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerService...') + yield _service + + LOGGER.info('Terminating KpiManagerService...') + _service.stop() + + LOGGER.info('Terminated KpiManagerService...') + +# This fixture will be requested by test cases and last during testing session. +# The client requires the server, so client fixture has the server as dependency. +# def monitoring_client(monitoring_service : MonitoringService): (Add for better understanding) +@pytest.fixture(scope='session') +def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disable=redefined-outer-name,unused-argument + LOGGER.info('Initializing KpiManagerClient...') + _client = KpiManagerClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerClient...') + yield _client + + LOGGER.info('Closing KpiManagerClient...') + _client.close() + + LOGGER.info('Closed KpiManagerClient...') \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/my_deploy_ofc26.sh b/src/tests/ofc26_flexscale/my_deploy_ofc26.sh new file mode 100755 index 000000000..540a57646 --- /dev/null +++ b/src/tests/ofc26_flexscale/my_deploy_ofc26.sh @@ -0,0 +1,240 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate OSM Client +#export TFS_COMPONENTS="${TFS_COMPONENTS} osm_client" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + +# Uncomment to activate Pluggables Component +#export TFS_COMPONENTS="${TFS_COMPONENTS} pluggables" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- Time Series Storage - Prometheus / Grafana Mimir ----------------------- + +# Set Time Series Storage installation mode to 'single' (i.e., Prometheus only). +# This option is convenient for development and testing. See ./deploy/all.sh or +# ./deploy/monitoring.sh for additional details. +export TSDB_DEPLOY_MODE="single" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Telemetry Config ------------------------------------------------------ + +# Define a Load Balancer IP for Telemetry Collector components +export LOAD_BALANCER_IP="192.168.5.250" # <-- Change this to match your network diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh new file mode 100755 index 000000000..026e56876 --- /dev/null +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0)/../../../ +echo "Running OFC26 test from folder: $(pwd)" +cd src/ +CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" + + +python -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ + tests/ofc26_flexscale/test_ofc26_mgon_integration.py diff --git a/src/tests/ofc26_flexscale/telemetry_service.py b/src/tests/ofc26_flexscale/telemetry_service.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py new file mode 100644 index 000000000..c142c0646 --- /dev/null +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -0,0 +1,66 @@ + + +import uuid +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI + + +# ---> KPI Manager messages creation for testing + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER + _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS + _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" + # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + + +# ---> Telemetry messages creation for testing + +devices = { + 'mgon': { + 'host' : '172.17.254.24', + 'port' : '50061', + 'username': 'admin', + 'password': 'admin', + 'insecure': True, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, + #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', + 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint': '10', + }, +} + +def create_basic_sub_request_parameters() -> dict: + + device = devices['mgon'] + if device: + kpi = device['kpi'] + resource = device['resource'] + endpoint = device['endpoint'] + return { + 'host' : device['host'], + 'port' : device['port'], + 'username' : device['username'], + 'password' : device['password'], + 'connect_timeout' : 15, + 'insecure' : device['insecure'], + 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample + 'sample_interval' : 10, # This should be in seconds units + 'duration' : 300.0, # Duration in seconds for how long to receive samples + 'kpi' : kpi, + 'resource' : resource, + 'endpoint' : endpoint, + } + return {} + + +# oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=1]/state/optical-power-total-input/instant +# oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=1]/state/optical-power-total-input/instant diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py new file mode 100644 index 000000000..1f9e6a982 --- /dev/null +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py @@ -0,0 +1,116 @@ + + +import logging + +from common.proto.kpi_manager_pb2 import KpiId +import time + +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request +from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector +from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters + +WITH_TFS = True #True/False +if WITH_TFS: + from .Fixtures import kpi_manager_client +else: + from .mock_tfs_services import kpi_manager_client + +LOGGER = logging.getLogger(__name__) + + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER + _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS + _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" + # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + + +def test_Complete_MGON_Integration(kpi_manager_client): + + # 1. KPI Descriptor Creation + LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") + kpi_descriptor_obj = create_kpi_descriptor_request() + _search_kpi_id = kpi_manager_pb2.KpiId() + _search_kpi_id = kpi_descriptor_obj.kpi_id + response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) + if isinstance(response, kpi_manager_pb2.KpiDescriptor): + LOGGER.info("KPI Descriptor already exists with ID: %s. Attempting to delete it.", _search_kpi_id.kpi_id.uuid) + else: + LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) + response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) + + + # 2. Telemetry Collector Creation + sub_parameters = create_basic_sub_request_parameters() + LOGGER.info("Subscription parameters: %s", sub_parameters) + + collector = GNMIOpenConfigCollector( + address = sub_parameters.get('host', ''), + port = sub_parameters.get('port', -1), + username = sub_parameters.get('username', None), + password = sub_parameters.get('password', None), + insecure = sub_parameters.get('insecure', None), + skip_verify = sub_parameters.get('skip_verify', True), + ) + if not collector.Connect(): + LOGGER.error("Failed to connect to the collector") + return + + LOGGER.info("----- Testing State Subscription -----") + + sub_data = [( + "x123", + { + "kpi" : sub_parameters['kpi'], + "endpoint" : sub_parameters['endpoint'], + "resource" : sub_parameters['resource'], + }, + sub_parameters['duration'], + sub_parameters['sample_interval'], + ),] + + response = collector.SubscribeState(sub_data) + if response is None: + LOGGER.error("Subscription failed.") + return + + LOGGER.info("Subscription started: Status: %s, Data: %s", response, sub_data) + + test_get_state_updates(collector, sub_data) + + LOGGER.info("Sleeping...") + time.sleep(600) + LOGGER.info("Done sleeping.") + LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") + + +def test_get_state_updates(collector, subscription_data): + """Test getting state updates.""" + LOGGER.info("----- Testing State Updates -----") + collector.SubscribeState(subscription_data) + + LOGGER.info("Requesting state updates for 300 seconds ...") + updates_received = [] + for samples in collector.GetState(duration=300, blocking=True): + LOGGER.info("Received state update: %s", samples) + updates_received.append(samples) + + assert len(updates_received) > 0 + + +if __name__ == "__main__": + test_Complete_MGON_Integration(kpi_manager_client) + + -- GitLab From f4fe80d27a69df0a507cea32152aa4f067546786 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Thu, 26 Feb 2026 14:28:53 +0100 Subject: [PATCH 20/41] working closed loop --- .../service/AutomationServiceServicerImpl.py | 7 +- .../zsm_handler_api/ZSMFilterFields.py | 4 +- .../service/zsm_handlers/P4INTZSMPlugin.py | 6 +- .../service/zsm_handlers/__init__.py | 4 +- src/common/tools/kafka/Variables.py | 2 +- src/policy/src/main/resources/application.yml | 2 +- src/policy/target/kubernetes/kubernetes.yml | 2 +- .../service/ServiceServiceServicerImpl.py | 5 +- .../backend/service/HelperMethods.py | 40 ++++++ .../service/TelemetryBackendService.py | 58 ++++++-- .../gnmi_oc/GnmiOpenConfigCollector.py | 4 +- .../automation/descriptors/automation.json | 2 +- src/tests/automation/run_test_automation.sh | 0 .../automation/test_functional_automation.py | 1 + src/tests/ofc26_flexscale/Fixtures.py | 17 ++- src/tests/ofc26_flexscale/run_ofc26_test.sh | 10 +- .../ofc26_flexscale/test_ofc26_messages.py | 13 +- .../test_ofc26_mgon_integration.py | 17 +-- .../test_ofc26_mgon_integration_V2.py | 127 ++++++++++++++++++ 19 files changed, 278 insertions(+), 43 deletions(-) mode change 100644 => 100755 src/tests/automation/run_test_automation.sh create mode 100644 src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py diff --git a/src/automation/service/AutomationServiceServicerImpl.py b/src/automation/service/AutomationServiceServicerImpl.py index 5c31b11bd..11b4d5e6a 100644 --- a/src/automation/service/AutomationServiceServicerImpl.py +++ b/src/automation/service/AutomationServiceServicerImpl.py @@ -42,6 +42,8 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): targetService = context_client.GetService(request.target_service_id) telemetryService = context_client.GetService(request.telemetry_service_id) + LOGGER.info(f"Target service type: {targetService.service_type}") + LOGGER.info(f"Telemetry service type: {telemetryService.service_type}") handler_cls = self.get_service_handler_based_on_service_types( targetService.service_type, telemetryService.service_type, ZSM_SERVICE_HANDLERS @@ -98,10 +100,12 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): return ZSMService() def get_service_handler_based_on_service_types( - self, targetServiceType ,telemetryServiceType , ZSM_SERVICE_HANDLERS + self, targetServiceType, telemetryServiceType, ZSM_SERVICE_HANDLERS ): flag = True for handler_cls, filters in ZSM_SERVICE_HANDLERS: + LOGGER.info(f"Handler: {handler_cls}") # <<--P4INTZSMPlugin + LOGGER.info(f"Filters: {filters}") # <--ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY for filter in filters: flag = self.check_if_requested_services_pass_filter_criteria( filter, targetServiceType, telemetryServiceType @@ -115,6 +119,7 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): ): flag = True for filter_key, filter_value in filter.items(): + LOGGER.info(f"Filter value: {filter_value}") # <--ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY if filter_value in ZSM_FILTER_FIELD_ALLOWED_VALUES[filter_key.value]: if filter_key.value == ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value: if filter_value != targetServiceType: diff --git a/src/automation/service/zsm_handler_api/ZSMFilterFields.py b/src/automation/service/zsm_handler_api/ZSMFilterFields.py index 7b00de5bc..ab91e1bc5 100644 --- a/src/automation/service/zsm_handler_api/ZSMFilterFields.py +++ b/src/automation/service/zsm_handler_api/ZSMFilterFields.py @@ -20,11 +20,11 @@ class ZSMFilterFieldEnum(Enum): TELEMETRY_SERVICE_TYPE = 'telemetry_service_type' TARGET_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_L2NM + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY } TELEMETRY_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_INT + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY } # Maps filter fields to allowed values per Filter field. # If no restriction (free text) None is specified diff --git a/src/automation/service/zsm_handlers/P4INTZSMPlugin.py b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py index 74694a376..f3ea519a6 100644 --- a/src/automation/service/zsm_handlers/P4INTZSMPlugin.py +++ b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py @@ -74,9 +74,9 @@ class P4INTZSMPlugin(_ZSMHandler): LOGGER.exception(ex.code()) # ToDo: Investigate why PolicyAddService throws exception # if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - context_client.close() - policy_client.close() - return self._zsm_create_response_empty() + # context_client.close() + # policy_client.close() + # return self._zsm_create_response_empty() context_client.close() analytics_frontend_client.close() diff --git a/src/automation/service/zsm_handlers/__init__.py b/src/automation/service/zsm_handlers/__init__.py index dcb533e61..abb5588f8 100644 --- a/src/automation/service/zsm_handlers/__init__.py +++ b/src/automation/service/zsm_handlers/__init__.py @@ -19,8 +19,8 @@ from automation.service.zsm_handlers.P4INTZSMPlugin import P4INTZSMPlugin ZSM_SERVICE_HANDLERS = [ (P4INTZSMPlugin, [ { - ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, - ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, } ]) ] diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 5a8e68215..dd3d9e07b 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -19,7 +19,7 @@ from kafka.errors import TopicAlreadyExistsError from common.Settings import get_setting LOGGER = logging.getLogger(__name__) -KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' +KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-public.{:s}.svc.cluster.local:{:s}' KAFKA_TOPIC_NUM_PARTITIONS = 1 KAFKA_TOPIC_REPLICATION_FACTOR = 1 diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml index ccfbffdf5..7292f222a 100644 --- a/src/policy/src/main/resources/application.yml +++ b/src/policy/src/main/resources/application.yml @@ -63,7 +63,7 @@ quarkus: context-service-host: "contextservice" monitoring-service-host: "monitoringservice" service-service-host: "serviceservice" - kafka-broker-host: "kafka-service.kafka.svc.cluster.local" + kafka-broker-host: "kafka-public.kafka.svc.cluster.local" resources: requests: cpu: 50m diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index cd578b34b..a83d03403 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -76,7 +76,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: KAFKA_BROKER_HOST - value: kafka-service.kafka.svc.cluster.local + value: kafka-public.kafka.svc.cluster.local - name: CONTEXT_SERVICE_HOST value: contextservice - name: MONITORING_SERVICE_HOST diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 98b0a2670..03bd27fc3 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -542,6 +542,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecomputeConnections(self, request : Service, context : grpc.ServicerContext) -> Empty: + if len(request.service_endpoint_ids) > 0: raise NotImplementedException('update-endpoints') @@ -549,7 +550,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): raise NotImplementedException('update-constraints') if len(request.service_config.config_rules) > 0: - raise NotImplementedException('update-config-rules') + del request.service_config.config_rules[:] + # raise NotImplementedException('update-config-rules') + LOGGER.error('update-config-rules not implemented') context_client = ContextClient() diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index 2eda93c8e..d0a68c8e8 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -22,6 +22,9 @@ from common.proto.kpi_manager_pb2 import KpiId from common.tools.context_queries.Device import get_device from common.tools.context_queries.EndPoint import get_endpoint_names from typing import List, Tuple, Optional +from telemetry.backend.service.collectors.gnmi_oc.KPI import KPI + +from .collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector LOGGER = logging.getLogger(__name__) @@ -174,3 +177,40 @@ def get_node_level_int_collector(collector_id: str, kpi_id: str, address: str, i LOGGER.exception(f"Failed to connect INT Collector on node {address}, {interface}:{port}") return collector if connected else None + + +def get_mgon_subscription_parameters(duration, interval) -> Optional[List[Tuple]]: + return [( + "x123", + { + "kpi" : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, # sub_parameters['kpi'], + "endpoint" : '4', # sub_parameters['endpoint'], + "resource" : 'wavelength-router', #sub_parameters['resource'], + }, + duration, + interval, + ),] + + +def get_mgon_collector( + address: str, port: int, username: Optional[str], password: Optional[str], insecure: Optional[bool], + skip_verify: Optional[bool] + ) -> Optional[_Collector]: + + _collector = GNMIOpenConfigCollector( + address = address, + port = port, + username = username, + password = password, + insecure = insecure, + skip_verify = skip_verify, + ) + try: + connected = _collector.Connect() + if not connected: + LOGGER.error(f"Failed to connect to MG-ON collector at {address}:{port}") + return None + return _collector + except Exception as ex: + LOGGER.exception(f"Exception while connecting to MG-ON collector at {address}:{port}") + return None diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index d4e99f300..2a7943e57 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -17,7 +17,7 @@ import time import logging import threading -from .HelperMethods import get_collector_by_kpi_id, get_subscription_parameters, get_node_level_int_collector +from .HelperMethods import get_collector_by_kpi_id, get_subscription_parameters, get_node_level_int_collector, get_mgon_subscription_parameters, get_mgon_collector from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from confluent_kafka import Consumer as KafkaConsumer @@ -144,6 +144,15 @@ class TelemetryBackendService(GenericGrpcService): ) return # Rest of the collectors + elif context_id == "43813baf-195e-5da6-af20-b3d0922e71a7": + self.device_collector = get_mgon_collector( + address = "172.17.254.24", + port = 50061, + username = "admin", + password = "admin", + insecure = True, + skip_verify = True + ) else: self.device_collector = get_collector_by_kpi_id( kpi_id, self.kpi_manager_client, self.context_client, self.driver_instance_cache) @@ -153,9 +162,11 @@ class TelemetryBackendService(GenericGrpcService): raise Exception(f"KPI ID: {kpi_id} - Collector not found.") # CONFIRM: The method (get_subscription_parameters) is working correctly. testcase in telemetry backend tests - resource_to_subscribe = get_subscription_parameters( - kpi_id, self.kpi_manager_client, self.context_client, duration, interval - ) + # resource_to_subscribe = get_subscription_parameters( + # kpi_id, self.kpi_manager_client, self.context_client, duration, interval + # ) + resource_to_subscribe = get_mgon_subscription_parameters(duration, interval) # TODO: Remove after confirming get_subscription_parameters is working correctly + if not resource_to_subscribe: LOGGER.warning(f"KPI ID: {kpi_id} - Resource to subscribe not found. Skipping...") raise Exception(f"KPI ID: {kpi_id} - Resource to subscribe not found.") @@ -167,14 +178,32 @@ class TelemetryBackendService(GenericGrpcService): raise status else: LOGGER.info(f"Subscription successful for KPI ID: {kpi_id} - Status: {status}") - + + sample_value = None for samples in self.device_collector.GetState(duration=duration, blocking=True): LOGGER.info(f"KPI ID: {kpi_id} - Samples: {samples}") - self.GenerateKpiValue(collector_id, kpi_id, samples) - - # TODO: Stop_event should be managed in this method because there will be no more specific collector - if stop_event.is_set(): - self.device_collector.Disconnect() + if isinstance(samples, dict): + inn_dict = samples.get('update', {}) + LOGGER.info(f"KPI ID: {kpi_id} - Inner Dictionary: {inn_dict}") + list_update = inn_dict.get('update', []) + LOGGER.info(f"KPI ID: {kpi_id} - List Update: {list_update}") + if len(list_update) > 0: + sample_value = list_update[0].get('val') + self.GenerateKpiValue(collector_id, kpi_id, sample_value) + ''' +{ + 'update': { + 'timestamp': 1772103489806507669, + 'update': + [ + {'path': 'openconfig-wavelength-router:wavelength-router/flex-scale-mg-on:optical-bands/optical-band[index=2]/state/optical-power-total-input/instant', 'val': -2.51} + ] + } + } + ''' + # TODO: Stop_event should be managed in this method because there will be no more specific collector + if stop_event.is_set(): + self.device_collector.Disconnect() def GenerateKpiValue(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): """ @@ -186,11 +215,12 @@ class TelemetryBackendService(GenericGrpcService): "kpi_id": kpi_id, "kpi_value": measured_kpi_value } + LOGGER.info(f"Producing KPI Value for Collector ID: {collector_id} - KPI ID: {kpi_id} - Value: {kpi_value}") producer.produce( - KafkaTopic.VALUE.value, - key=collector_id, - value=json.dumps(kpi_value), - callback=self.delivery_callback + "topic_value", #KafkaTopic.VALUE.value, + key = collector_id, + value = json.dumps(kpi_value), + callback = self.delivery_callback ) producer.flush() diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index 63556d4e9..1b7067b03 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -20,7 +20,7 @@ from typing import Dict, Optional, Tuple, List, Union, Any, Iterator from pygnmi.client import gNMIclient from telemetry.backend.service.collector_api._Collector import _Collector from .PathMapper import PathMapper -from .SubscriptionNew import LOGGER, Subscription +from .SubscriptionNew import Subscription logging.basicConfig( level=get_log_level(), @@ -133,7 +133,7 @@ class GNMIOpenConfigCollector(_Collector): kpi = sub_endpoint['kpi' ], resource = sub_endpoint['resource'], ) - LOGGER.debug("Built %d candidate path(s) for endpoint '%s'", len(paths), sub_endpoint['endpoint']) + self.logger.debug("Built %d candidate path(s) for endpoint '%s'", len(paths), paths) self._subscriptions[sub_id] = Subscription( sub_id = sub_id, gnmi_client = self.client, # type: ignore diff --git a/src/tests/automation/descriptors/automation.json b/src/tests/automation/descriptors/automation.json index a28156a78..5e6287ec3 100644 --- a/src/tests/automation/descriptors/automation.json +++ b/src/tests/automation/descriptors/automation.json @@ -10,7 +10,7 @@ "analyzer":{ "operation_mode": "ANALYZEROPERATIONMODE_STREAMING", "parameters": { - "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [-5, -15]}]}" + "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [-5, -25]}]}" }, "input_kpi_ids": [ {"kpi_id": { "uuid": "6e22f180-ba28-4641-b190-2287bf447777"}} diff --git a/src/tests/automation/run_test_automation.sh b/src/tests/automation/run_test_automation.sh old mode 100644 new mode 100755 diff --git a/src/tests/automation/test_functional_automation.py b/src/tests/automation/test_functional_automation.py index a29307765..c74d05adc 100644 --- a/src/tests/automation/test_functional_automation.py +++ b/src/tests/automation/test_functional_automation.py @@ -82,6 +82,7 @@ def test_service_zsm_create( # Add important information in the request loaded_request = _zsm_create_request(loaded_request, kpi_manager_client) # loaded_request = _static_zsm_create_request() + # Invoke Automation automation_client.ZSMCreate(loaded_request) diff --git a/src/tests/ofc26_flexscale/Fixtures.py b/src/tests/ofc26_flexscale/Fixtures.py index c5b66b858..b958a39cd 100644 --- a/src/tests/ofc26_flexscale/Fixtures.py +++ b/src/tests/ofc26_flexscale/Fixtures.py @@ -14,12 +14,16 @@ import pytest import logging - +import os from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient from kpi_manager.client.KpiManagerClient import KpiManagerClient +from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient +# Import ENV variables +_ip_kpi_address = os.getenv('IP_KPI', None) +_ip_tele_address = os.getenv('IP_TELE', None) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) @@ -54,10 +58,19 @@ def service_client(): @pytest.fixture(scope='session') def kpi_manager_client(): - _client = KpiManagerClient(host="10.152.183.121") + _client = KpiManagerClient(host=_ip_kpi_address) _client.connect() LOGGER.info('Yielding Connected KpiManagerClient...') yield _client LOGGER.info('Closed KpiManagerClient...') _client.close() + +@pytest.fixture(scope='session') +def telemetry_frontend_client(): + _client = TelemetryFrontendClient(host=_ip_tele_address) + _client.connect() + LOGGER.info('Yielding Connected TelemetryFrontendClient...') + yield _client + LOGGER.info('Closed TelemetryFrontendClient...') + _client.close() diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh index 026e56876..8fbdd0457 100755 --- a/src/tests/ofc26_flexscale/run_ofc26_test.sh +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -20,6 +20,14 @@ cd src/ CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +IP_KPI=$(kubectl get all --all-namespaces | grep service/kpi-managerservice | awk '{print $4}') +export IP_KPI +echo "KPI Manager Service IP: ${IP_KPI}" + +IP_TELE=$(kubectl get all --all-namespaces | grep service/telemetryservice | awk '{print $4}') +export IP_TELE +echo "Telemetry Frontend Service IP: ${IP_TELE}" + python -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ - tests/ofc26_flexscale/test_ofc26_mgon_integration.py + tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py index c142c0646..20217f2b7 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_messages.py +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -4,6 +4,7 @@ import uuid from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI +from common.proto import telemetry_frontend_pb2 # ---> KPI Manager messages creation for testing @@ -34,7 +35,7 @@ devices = { 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model - 'endpoint': '10', + 'endpoint': '4', }, } @@ -62,5 +63,11 @@ def create_basic_sub_request_parameters() -> dict: return {} -# oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=1]/state/optical-power-total-input/instant -# oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=1]/state/optical-power-total-input/instant +def create_collector_request(): + _create_collector_request = telemetry_frontend_pb2.Collector() + _create_collector_request.collector_id.collector_id.uuid = "efef4d95-1cf1-43c4-9742-95c283dddddd" + _create_collector_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_collector_request.duration_s = 300 + _create_collector_request.interval_s = 10 + _create_collector_request.int_collector.context_id = "43813baf-195e-5da6-af20-b3d0922e71a7" + return _create_collector_request diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py index 1f9e6a982..a22115701 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py @@ -42,14 +42,15 @@ def test_Complete_MGON_Integration(kpi_manager_client): kpi_descriptor_obj = create_kpi_descriptor_request() _search_kpi_id = kpi_manager_pb2.KpiId() _search_kpi_id = kpi_descriptor_obj.kpi_id - response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) - if isinstance(response, kpi_manager_pb2.KpiDescriptor): - LOGGER.info("KPI Descriptor already exists with ID: %s. Attempting to delete it.", _search_kpi_id.kpi_id.uuid) - else: - LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) - response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, KpiId) + # response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) + + # if isinstance(response, kpi_manager_pb2.KpiDescriptor): + # LOGGER.info("KPI Descriptor already exists with ID: %s. Skipping creation.", _search_kpi_id.kpi_id.uuid) + # else: + # LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) + response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) # 2. Telemetry Collector Creation diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py new file mode 100644 index 000000000..f6ea99945 --- /dev/null +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -0,0 +1,127 @@ + + +import logging + +from common.proto.kpi_manager_pb2 import KpiId +from common.proto.telemetry_frontend_pb2 import CollectorId +import time + +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request, create_collector_request +from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters + +WITH_TFS = True #True/False +if WITH_TFS: + from .Fixtures import kpi_manager_client, telemetry_frontend_client +else: + from .mock_tfs_services import kpi_manager_client + +LOGGER = logging.getLogger(__name__) + + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER + _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS + _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" + # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + +# def create_collector_filter(): +# _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() +# kpi_id_obj = KpiId() +# # kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) +# kpi_id_obj.kpi_id.uuid = "8c5ca114-cdc7-4081-b128-b667fd159832" +# _create_collector_filter.kpi_id.append(kpi_id_obj) +# return _create_collector_filter + + +def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client): + + # 1. KPI Descriptor Creation + LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") + kpi_descriptor_obj = create_kpi_descriptor_request() + _search_kpi_id = kpi_manager_pb2.KpiId() + _search_kpi_id = kpi_descriptor_obj.kpi_id + + try: + response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) + if isinstance(response, kpi_manager_pb2.KpiDescriptor): + LOGGER.info("KPI Descriptor already exists with ID: %s. Skipping creation.", _search_kpi_id.kpi_id.uuid) + except Exception as e: + LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) + response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) + + # 2. Telemetry Collector Creation + + _collector_request = create_collector_request() + _search_collector_id = CollectorId() + _search_collector_id = _collector_request.collector_id + try: + response_col = telemetry_frontend_client.StopCollector(_search_collector_id) + LOGGER.info("Response gRPC message object: {:}".format(response_col)) + if response is not None: + response = telemetry_frontend_client.StartCollector(_collector_request) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, CollectorId) + except Exception as e: + LOGGER.info("No existing Collector found with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) + response = telemetry_frontend_client.StartCollector(_collector_request) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, CollectorId) + + # sub_parameters = create_basic_sub_request_parameters() + # LOGGER.info("Subscription parameters: %s", sub_parameters) + + + # LOGGER.info("----- Testing State Subscription -----") + + # sub_data = [( + # "x123", + # { + # "kpi" : sub_parameters['kpi'], + # "endpoint" : sub_parameters['endpoint'], + # "resource" : sub_parameters['resource'], + # }, + # sub_parameters['duration'], + # sub_parameters['sample_interval'], + # ),] + + + + # LOGGER.info("Subscription started: Status: %s, Data: %s", response, sub_data) + + + LOGGER.info("Sleeping...") + time.sleep(600) + LOGGER.info("Done sleeping.") + LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") + + +# def test_get_state_updates(collector, subscription_data): +# """Test getting state updates.""" +# LOGGER.info("----- Testing State Updates -----") +# collector.SubscribeState(subscription_data) + +# LOGGER.info("Requesting state updates for 300 seconds ...") +# updates_received = [] +# for samples in collector.GetState(duration=300, blocking=True): +# LOGGER.info("Received state update: %s", samples) +# updates_received.append(samples) + +# assert len(updates_received) > 0 + + +if __name__ == "__main__": + test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client) + + -- GitLab From eb80a6daab8398d21da63c348fe33e832fb68727 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Thu, 26 Feb 2026 14:43:11 +0100 Subject: [PATCH 21/41] last bugs --- .../policy/CommonPolicyServiceImpl.java | 4 +- .../test_ofc26_mgon_integration.py | 117 ------------------ 2 files changed, 3 insertions(+), 118 deletions(-) delete mode 100644 src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java b/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java index 074464cf4..e3841139e 100644 --- a/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java +++ b/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java @@ -393,6 +393,8 @@ public class CommonPolicyServiceImpl { final var policyRuleTypeService = new PolicyRuleTypeService(policyRuleService); final var policyRule = new PolicyRule(policyRuleTypeService); contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); + + LOGGER.infof("Policy Rule state is now [%s]", policyRuleState.toString()); } public void setPolicyRuleDeviceToContext( @@ -404,6 +406,6 @@ public class CommonPolicyServiceImpl { final var policyRuleTypeService = new PolicyRuleTypeDevice(policyRuleDevice); final var policyRule = new PolicyRule(policyRuleTypeService); - contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); + final var policyRuleId = contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); } } diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py deleted file mode 100644 index a22115701..000000000 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration.py +++ /dev/null @@ -1,117 +0,0 @@ - - -import logging - -from common.proto.kpi_manager_pb2 import KpiId -import time - -from common.proto import kpi_manager_pb2 -from common.proto.kpi_sample_types_pb2 import KpiSampleType - -from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request -from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector -from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters - -WITH_TFS = True #True/False -if WITH_TFS: - from .Fixtures import kpi_manager_client -else: - from .mock_tfs_services import kpi_manager_client - -LOGGER = logging.getLogger(__name__) - - -def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): - _create_kpi_request = kpi_manager_pb2.KpiDescriptor() - _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" - _create_kpi_request.kpi_description = descriptor_name - _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER - _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS - _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" - # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' - # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) - # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' - # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' - return _create_kpi_request - - -def test_Complete_MGON_Integration(kpi_manager_client): - - # 1. KPI Descriptor Creation - LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") - kpi_descriptor_obj = create_kpi_descriptor_request() - _search_kpi_id = kpi_manager_pb2.KpiId() - _search_kpi_id = kpi_descriptor_obj.kpi_id - # response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) - - # if isinstance(response, kpi_manager_pb2.KpiDescriptor): - # LOGGER.info("KPI Descriptor already exists with ID: %s. Skipping creation.", _search_kpi_id.kpi_id.uuid) - # else: - # LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) - response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, KpiId) - - - # 2. Telemetry Collector Creation - sub_parameters = create_basic_sub_request_parameters() - LOGGER.info("Subscription parameters: %s", sub_parameters) - - collector = GNMIOpenConfigCollector( - address = sub_parameters.get('host', ''), - port = sub_parameters.get('port', -1), - username = sub_parameters.get('username', None), - password = sub_parameters.get('password', None), - insecure = sub_parameters.get('insecure', None), - skip_verify = sub_parameters.get('skip_verify', True), - ) - if not collector.Connect(): - LOGGER.error("Failed to connect to the collector") - return - - LOGGER.info("----- Testing State Subscription -----") - - sub_data = [( - "x123", - { - "kpi" : sub_parameters['kpi'], - "endpoint" : sub_parameters['endpoint'], - "resource" : sub_parameters['resource'], - }, - sub_parameters['duration'], - sub_parameters['sample_interval'], - ),] - - response = collector.SubscribeState(sub_data) - if response is None: - LOGGER.error("Subscription failed.") - return - - LOGGER.info("Subscription started: Status: %s, Data: %s", response, sub_data) - - test_get_state_updates(collector, sub_data) - - LOGGER.info("Sleeping...") - time.sleep(600) - LOGGER.info("Done sleeping.") - LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") - - -def test_get_state_updates(collector, subscription_data): - """Test getting state updates.""" - LOGGER.info("----- Testing State Updates -----") - collector.SubscribeState(subscription_data) - - LOGGER.info("Requesting state updates for 300 seconds ...") - updates_received = [] - for samples in collector.GetState(duration=300, blocking=True): - LOGGER.info("Received state update: %s", samples) - updates_received.append(samples) - - assert len(updates_received) > 0 - - -if __name__ == "__main__": - test_Complete_MGON_Integration(kpi_manager_client) - - -- GitLab From 7b0bc3cb4913a75a3891f954d855304fee42e81d Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 2 Mar 2026 10:49:17 +0100 Subject: [PATCH 22/41] fix for dynamic telemetry activation --- .../backend/service/HelperMethods.py | 18 +++++------ .../service/TelemetryBackendService.py | 30 ++++++++++++------- .../ofc26_flexscale/telemetry_service.py | 0 .../ofc26_flexscale/test_ofc26_messages.py | 17 ++++++----- .../test_ofc26_mgon_integration_V2.py | 2 +- 5 files changed, 39 insertions(+), 28 deletions(-) delete mode 100644 src/tests/ofc26_flexscale/telemetry_service.py diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index d0a68c8e8..313dbba86 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -29,7 +29,7 @@ from .collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector LOGGER = logging.getLogger(__name__) def get_subscription_parameters( - kpi_id : str, kpi_manager_client, context_client, duration, interval + kpi_id : str, kpi_manager_client, context_client, resource, duration, interval ) -> Optional[List[Tuple]]: """ Method to get subscription parameters based on KPI ID. @@ -47,9 +47,9 @@ def get_subscription_parameters( - A KPI Descriptor must be added in KPI DB with correct device_id. - The device must be available in the context. """ - kpi_id_obj = KpiId() + kpi_id_obj = KpiId() kpi_id_obj.kpi_id.uuid = kpi_id # pyright: ignore[reportAttributeAccessIssue] - kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) + kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) if not kpi_descriptor: LOGGER.warning(f"KPI ID: {kpi_id} - Descriptor not found. Skipping...") return None @@ -90,7 +90,7 @@ def get_subscription_parameters( { "kpi" : kpi_sample_type, # As request is based on the single KPI so it should have only one endpoint "endpoint" : endpoint_data[endpoint.endpoint_uuid.uuid][0], # Endpoint name - "resource" : 'interface', # Example resource type + "resource" : resource, # Example resource type is 'interface' or 'wavelength-router' for MG-ON, this should be defined in the KPI Descriptor or as part of the request }, float(duration), float(interval), @@ -179,13 +179,13 @@ def get_node_level_int_collector(collector_id: str, kpi_id: str, address: str, i return collector if connected else None -def get_mgon_subscription_parameters(duration, interval) -> Optional[List[Tuple]]: +def get_mgon_subscription_parameters(resource: str, endpoint: str, kpi: str, duration: int, interval: int) -> Optional[List[Tuple]]: return [( - "x123", + str(uuid.uuid4()), # "x123", { - "kpi" : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, # sub_parameters['kpi'], - "endpoint" : '4', # sub_parameters['endpoint'], - "resource" : 'wavelength-router', #sub_parameters['resource'], + "kpi" : kpi, # sub_parameters['kpi'], + "endpoint" : endpoint, # sub_parameters['endpoint'], + "resource" : resource, #sub_parameters['resource'], }, duration, interval, diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index 2a7943e57..7214a2bcc 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -97,6 +97,10 @@ class TelemetryBackendService(GenericGrpcService): threading.Thread(target=self.GenericCollectorHandler, args=( collector_id, + collector, # TODO: later all other collector[''] should be removed. + # For now, to avoid multiple changes in the code, + # I am passing the whole collector dict and accessing the required parameters in the GenericCollectorHandler method. + # This will be changed after confirming the current implementation is working fine. collector['kpi_id'], duration, collector['interval'], @@ -126,7 +130,9 @@ class TelemetryBackendService(GenericGrpcService): LOGGER.warning( f"Unable to consume message from topic: {KafkaTopic.TELEMETRY_REQUEST.value}. ERROR: {e}") - def GenericCollectorHandler(self, collector_id, kpi_id, duration, interval, interface, port, service_id, context_id, stop_event): + def GenericCollectorHandler(self, + collector_id, collector, kpi_id, duration, interval, interface, port, + service_id, context_id, stop_event): """ Method to handle collector request. """ @@ -146,12 +152,12 @@ class TelemetryBackendService(GenericGrpcService): # Rest of the collectors elif context_id == "43813baf-195e-5da6-af20-b3d0922e71a7": self.device_collector = get_mgon_collector( - address = "172.17.254.24", - port = 50061, - username = "admin", - password = "admin", - insecure = True, - skip_verify = True + address = collector['host'], # "172.17.254.24", + port = collector['port'], # 50061, + username = collector['username'], # "admin", + password = collector['password'], # "admin", + insecure = collector.get('insecure', True), + skip_verify = collector.get('skip_verify', True) ) else: self.device_collector = get_collector_by_kpi_id( @@ -163,10 +169,14 @@ class TelemetryBackendService(GenericGrpcService): # CONFIRM: The method (get_subscription_parameters) is working correctly. testcase in telemetry backend tests # resource_to_subscribe = get_subscription_parameters( - # kpi_id, self.kpi_manager_client, self.context_client, duration, interval + # kpi_id, self.kpi_manager_client, self.context_client, resource, duration, interval # ) - resource_to_subscribe = get_mgon_subscription_parameters(duration, interval) # TODO: Remove after confirming get_subscription_parameters is working correctly - + # TODO: Remove after confirming get_subscription_parameters generic is working correctly + resource_to_subscribe = get_mgon_subscription_parameters( + collector['resource'], collector['endpoint'], collector['kpi'], + collector['duration'], collector['sample_interval'] + ) + if not resource_to_subscribe: LOGGER.warning(f"KPI ID: {kpi_id} - Resource to subscribe not found. Skipping...") raise Exception(f"KPI ID: {kpi_id} - Resource to subscribe not found.") diff --git a/src/tests/ofc26_flexscale/telemetry_service.py b/src/tests/ofc26_flexscale/telemetry_service.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py index 20217f2b7..11ad15946 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_messages.py +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -27,15 +27,16 @@ def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): devices = { 'mgon': { - 'host' : '172.17.254.24', - 'port' : '50061', - 'username': 'admin', - 'password': 'admin', - 'insecure': True, - 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, + 'host' : '172.17.254.24', + 'port' : '50061', + 'username' : 'admin', + 'password' : 'admin', + 'insecure' : True, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', - 'resource': 'wavelength-router', #TODO: verify resource name form mg-on model - 'endpoint': '4', + 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint' : '4', + 'skip_verify': True }, } diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index f6ea99945..9ec62501f 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -74,7 +74,7 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client LOGGER.info("Response gRPC message object: {:}".format(response)) assert isinstance(response, CollectorId) except Exception as e: - LOGGER.info("No existing Collector found with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) + LOGGER.info("Error finding the collector with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) response = telemetry_frontend_client.StartCollector(_collector_request) LOGGER.info("Response gRPC message object: {:}".format(response)) assert isinstance(response, CollectorId) -- GitLab From 46e5f9dead6642418b3e4cf9c880b8488fc1bbc3 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 2 Mar 2026 13:16:31 +0100 Subject: [PATCH 23/41] telemetry fix --- .../service/TelemetryBackendService.py | 4 +- .../ofc26_flexscale/test_ofc26_messages.py | 15 +- .../test_ofc26_mgon_integration_V2.py | 74 +- .../topology/CNIT/1.context.json | 19 + .../CNIT/2.0nodes-links_no_slots.json | 1378 +++++++++++++++++ .../topology/CNIT/3.0-ob-s.json | 23 + .../topology/CNIT/3.1-sc-s1-alien.json | 25 + .../topology/CNIT/3.2-sc-s2-alien.json | 25 + .../topology/CNIT/4.0.ob_l.json | 25 + .../topology/CNIT/4.1-sc-l1-alien.json | 25 + .../topology/CNIT/4.2-sc-l2-alien.json | 25 + .../topology/CNIT/5.0ob_c1.json | 23 + .../topology/CNIT/5.1ob_c2.json | 25 + .../topology/CNIT/5.2-sc-c-alien.json | 25 + .../topology/CNIT/5.3-oc-c-service.json | 23 + .../topology/HHI/1.context.json | 19 + .../HHI/2.0nodes-links_no_slots_2TPs.json | 826 ++++++++++ .../HHI/2.0nodes-links_no_slots_6TPs.json | 1378 +++++++++++++++++ .../topology/HHI/3.0-ob-s.json | 23 + .../topology/HHI/3.1-sc-s1-alien.json | 25 + .../topology/HHI/3.2-sc-s2-alien.json | 25 + .../topology/HHI/4.0.ob_l.json | 25 + .../topology/HHI/4.1-sc-l1-alien.json | 25 + .../topology/HHI/4.2-sc-l2-alien.json | 25 + .../topology/HHI/5.0ob_c1.json | 23 + .../topology/HHI/5.1ob_c2 - Copia.json | 25 + .../topology/HHI/5.1ob_c2.json | 25 + .../topology/HHI/5.2-sc-c-alien.json | 25 + .../topology/HHI/5.3-oc-c-service.json | 23 + 29 files changed, 4194 insertions(+), 32 deletions(-) create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/1.context.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/1.context.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json create mode 100644 src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index 7214a2bcc..a7cd05e45 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -82,7 +82,7 @@ class TelemetryBackendService(GenericGrpcService): LOGGER.error(f"Consumer error: {receive_msg.error()}") break try: - collector = json.loads(receive_msg.value().decode('utf-8')) + collector = json.loads(receive_msg.value().decode('utf-8')) collector_id = receive_msg.key().decode('utf-8') LOGGER.debug(f"Received Collector: {collector_id} - {collector}") @@ -136,7 +136,7 @@ class TelemetryBackendService(GenericGrpcService): """ Method to handle collector request. """ - + LOGGER.info(f"Starting Collector Handler for Collector ID: {collector_id} - KPI ID: {kpi_id}") # INT collector invocation if interface: self.device_collector = get_node_level_int_collector( diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py index 11ad15946..2dbef239f 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_messages.py +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -1,6 +1,6 @@ -import uuid +import json from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI @@ -17,7 +17,7 @@ def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' - # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "<>" # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' return _create_kpi_request @@ -35,7 +35,7 @@ devices = { 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model - 'endpoint' : '4', + 'endpoint' : '1', 'skip_verify': True }, } @@ -44,9 +44,6 @@ def create_basic_sub_request_parameters() -> dict: device = devices['mgon'] if device: - kpi = device['kpi'] - resource = device['resource'] - endpoint = device['endpoint'] return { 'host' : device['host'], 'port' : device['port'], @@ -57,9 +54,9 @@ def create_basic_sub_request_parameters() -> dict: 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample 'sample_interval' : 10, # This should be in seconds units 'duration' : 300.0, # Duration in seconds for how long to receive samples - 'kpi' : kpi, - 'resource' : resource, - 'endpoint' : endpoint, + 'kpi' : device['kpi'], + 'resource' : device['resource'], + 'endpoint' : device['endpoint'], } return {} diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index 9ec62501f..27c6a2cb4 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -1,10 +1,11 @@ import logging - +import time from common.proto.kpi_manager_pb2 import KpiId from common.proto.telemetry_frontend_pb2 import CollectorId import time +import threading from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType @@ -12,6 +13,9 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request, create_collector_request from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters +from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService + + WITH_TFS = True #True/False if WITH_TFS: from .Fixtures import kpi_manager_client, telemetry_frontend_client @@ -48,8 +52,8 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client # 1. KPI Descriptor Creation LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") kpi_descriptor_obj = create_kpi_descriptor_request() - _search_kpi_id = kpi_manager_pb2.KpiId() - _search_kpi_id = kpi_descriptor_obj.kpi_id + _search_kpi_id = kpi_manager_pb2.KpiId() + _search_kpi_id = kpi_descriptor_obj.kpi_id try: response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) @@ -63,26 +67,52 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client # 2. Telemetry Collector Creation + # _collector_request = create_collector_request() + # _search_collector_id = CollectorId() + # _search_collector_id = _collector_request.collector_id + # try: + # response_col = telemetry_frontend_client.StopCollector(_search_collector_id) + # LOGGER.info("Response gRPC message object: {:}".format(response_col)) + # if response is not None: + # response = telemetry_frontend_client.StartCollector(_collector_request) + # LOGGER.info("Response gRPC message object: {:}".format(response)) + # assert isinstance(response, CollectorId) + # except Exception as e: + # LOGGER.info("Error finding the collector with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) + # response = telemetry_frontend_client.StartCollector(_collector_request) + # LOGGER.info("Response gRPC message object: {:}".format(response)) + # assert isinstance(response, CollectorId) + + # step 2: Telemetry Collector backup option _collector_request = create_collector_request() - _search_collector_id = CollectorId() - _search_collector_id = _collector_request.collector_id - try: - response_col = telemetry_frontend_client.StopCollector(_search_collector_id) - LOGGER.info("Response gRPC message object: {:}".format(response_col)) - if response is not None: - response = telemetry_frontend_client.StartCollector(_collector_request) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, CollectorId) - except Exception as e: - LOGGER.info("Error finding the collector with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) - response = telemetry_frontend_client.StartCollector(_collector_request) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, CollectorId) + _collector = create_basic_sub_request_parameters() + _coll_id = "mgon_collector_id" + LOGGER.info("Subscription for collector %s parameters: %s", _coll_id, _collector) - # sub_parameters = create_basic_sub_request_parameters() - # LOGGER.info("Subscription parameters: %s", sub_parameters) - - + _duration = _collector_request.duration_s + _interval = _collector_request.interval_s + + stop_event = threading.Event() + collector_thread = threading.Thread( + target=TelemetryBackendService.GenericCollectorHandler, + args=( + _coll_id, _collector, None, _duration, _interval, + None, None, None, None, stop_event + ) + ) + + def stop_after_duration(completion_time, stop_event): + time.sleep(completion_time) + if not stop_event.is_set(): + LOGGER.warning(f"Execution duration ({completion_time}) completed for Collector: {_coll_id}") + if stop_event: + stop_event.set() + + duration_thread = threading.Thread( + target=stop_after_duration, daemon=True, name=f"stop_after_duration_{_coll_id}", + args=(_duration, stop_event) + ) + duration_thread.start() # LOGGER.info("----- Testing State Subscription -----") # sub_data = [( @@ -103,6 +133,8 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client LOGGER.info("Sleeping...") time.sleep(600) + if stop_event: + stop_event.set() LOGGER.info("Done sleeping.") LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") diff --git a/src/tests/ofc26_flexscale/topology/CNIT/1.context.json b/src/tests/ofc26_flexscale/topology/CNIT/1.context.json new file mode 100644 index 000000000..36b3c44fd --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/1.context.json @@ -0,0 +1,19 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [], + "link_ids": [] + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json b/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json new file mode 100644 index 000000000..d9f643f09 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json @@ -0,0 +1,1378 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.21" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.22" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.23" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.24" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-3-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-1-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-9-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-1-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json b/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json new file mode 100644 index 000000000..d3fa89a1f --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-S"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "S_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "6000"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json new file mode 100644 index 000000000..f1752d7e6 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-25-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-25-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "1"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json new file mode 100644 index 000000000..4a38e9022 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-26-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-26-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "2900"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "1"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json b/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json new file mode 100644 index 000000000..0f02c6122 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-L"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "L_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json new file mode 100644 index 000000000..e92ef29da --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-27-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-27-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json new file mode 100644 index 000000000..bada34dbc --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-28-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-28-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json b/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json new file mode 100644 index 000000000..ab56e9daa --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json b/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json new file mode 100644 index 000000000..b9398c106 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "3"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json new file mode 100644 index 000000000..831a271d2 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-c"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-29-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-29-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json b/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json new file mode 100644 index 000000000..47b551a94 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-channel-C"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "1"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "800.0"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/1.context.json b/src/tests/ofc26_flexscale/topology/HHI/1.context.json new file mode 100644 index 000000000..36b3c44fd --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/1.context.json @@ -0,0 +1,19 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [], + "link_ids": [] + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json new file mode 100644 index 000000000..2fa101744 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json @@ -0,0 +1,826 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44551" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44552" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44553" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44554" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-10-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-10-out", + "dst_port": "port-3-in", + "local_peer_port": "port-10-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-10-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-10-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-2-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-2-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-2-out", + "dst_port": "port-1-in", + "local_peer_port": "port-2-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json new file mode 100644 index 000000000..a4dea6ae3 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json @@ -0,0 +1,1378 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44551" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44552" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44553" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44554" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-10-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-10-out", + "dst_port": "port-3-in", + "local_peer_port": "port-10-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-10-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-10-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-2-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-2-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-2-out", + "dst_port": "port-1-in", + "local_peer_port": "port-2-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json b/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json new file mode 100644 index 000000000..d3fa89a1f --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-S"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "S_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "6000"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json b/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json new file mode 100644 index 000000000..37969cfb8 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-25-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-25-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "3"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json b/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json new file mode 100644 index 000000000..e4d620d47 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-26-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-26-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "2900"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "3"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json b/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json new file mode 100644 index 000000000..c63243f32 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-L"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "L_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "3"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json b/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json new file mode 100644 index 000000000..3d24a34e3 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-27-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-27-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json b/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json new file mode 100644 index 000000000..8210c786e --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-28-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-28-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1200"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json b/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json new file mode 100644 index 000000000..ab56e9daa --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json new file mode 100644 index 000000000..d34f10d85 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "1900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json new file mode 100644 index 000000000..31229c1dc --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json b/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json new file mode 100644 index 000000000..8411862c0 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-c"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-29-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-29-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1959"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json b/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json new file mode 100644 index 000000000..169b854c2 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-channel-C"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "1"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "800.0"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}} + ], + "service_config": {"config_rules": []} + } + ] +} -- GitLab From de9ecf8e968132a35110ef00bc23c26804fc87ba Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 2 Mar 2026 13:34:38 +0100 Subject: [PATCH 24/41] refactor test_Complete_MGON_Integration for improved logging and thread management --- .../test_ofc26_mgon_integration_V2.py | 38 +++++++------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index 27c6a2cb4..069bb3b00 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -52,7 +52,6 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client # 1. KPI Descriptor Creation LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") kpi_descriptor_obj = create_kpi_descriptor_request() - _search_kpi_id = kpi_manager_pb2.KpiId() _search_kpi_id = kpi_descriptor_obj.kpi_id try: @@ -60,7 +59,7 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client if isinstance(response, kpi_manager_pb2.KpiDescriptor): LOGGER.info("KPI Descriptor already exists with ID: %s. Skipping creation.", _search_kpi_id.kpi_id.uuid) except Exception as e: - LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it.", _search_kpi_id.kpi_id.uuid) + LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it. Error: %s", _search_kpi_id.kpi_id.uuid, str(e)) response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) LOGGER.info("Response gRPC message object: {:}".format(response)) assert isinstance(response, KpiId) @@ -98,43 +97,34 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client args=( _coll_id, _collector, None, _duration, _interval, None, None, None, None, stop_event - ) + ), + daemon=False ) + collector_thread.start() def stop_after_duration(completion_time, stop_event): time.sleep(completion_time) if not stop_event.is_set(): LOGGER.warning(f"Execution duration ({completion_time}) completed for Collector: {_coll_id}") - if stop_event: - stop_event.set() + stop_event.set() duration_thread = threading.Thread( target=stop_after_duration, daemon=True, name=f"stop_after_duration_{_coll_id}", args=(_duration, stop_event) ) duration_thread.start() - # LOGGER.info("----- Testing State Subscription -----") - - # sub_data = [( - # "x123", - # { - # "kpi" : sub_parameters['kpi'], - # "endpoint" : sub_parameters['endpoint'], - # "resource" : sub_parameters['resource'], - # }, - # sub_parameters['duration'], - # sub_parameters['sample_interval'], - # ),] - + LOGGER.info("Sleeping for %d seconds...", _duration) + time.sleep(_duration) - # LOGGER.info("Subscription started: Status: %s, Data: %s", response, sub_data) + LOGGER.info("Setting stop event for Collector: %s", _coll_id) + stop_event.set() + + # Wait for collector thread to complete + collector_thread.join(timeout=10) + if collector_thread.is_alive(): + LOGGER.warning("Collector thread did not terminate within timeout") - - LOGGER.info("Sleeping...") - time.sleep(600) - if stop_event: - stop_event.set() LOGGER.info("Done sleeping.") LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") -- GitLab From 362464e1e54b279dc1c9cbe83b290b67772ceb91 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 2 Mar 2026 13:54:28 +0100 Subject: [PATCH 25/41] refactor test_Complete_MGON_Integration to enhance telemetry service initialization and collector handling --- .../test_ofc26_mgon_integration_V2.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index 069bb3b00..6f115231b 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -10,10 +10,11 @@ import threading from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType +from src.telemetry.backend.service.collector_api import DriverFactory from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request, create_collector_request from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters -from src.telemetry.backend.service.TelemetryBackendService import TelemetryBackendService +from src.telemetry.backend.service.TelemetryBackendService import DriverInstanceCache, TelemetryBackendService WITH_TFS = True #True/False @@ -83,6 +84,14 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client # assert isinstance(response, CollectorId) # step 2: Telemetry Collector backup option + from telemetry.backend.service.collectors import COLLECTORS + from telemetry.backend.service.collector_api.DriverFactory import DriverFactory + from telemetry.backend.service.collector_api.DriverInstanceCache import DriverInstanceCache, preload_drivers + + driver_factory = DriverFactory(COLLECTORS) + driver_instance_cache = DriverInstanceCache(driver_factory) + _service = TelemetryBackendService(driver_instance_cache) + _collector_request = create_collector_request() _collector = create_basic_sub_request_parameters() _coll_id = "mgon_collector_id" @@ -93,10 +102,10 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client stop_event = threading.Event() collector_thread = threading.Thread( - target=TelemetryBackendService.GenericCollectorHandler, + target=_service.GenericCollectorHandler, args=( _coll_id, _collector, None, _duration, _interval, - None, None, None, None, stop_event + None, None, None, "43813baf-195e-5da6-af20-b3d0922e71a7", stop_event ), daemon=False ) -- GitLab From 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Fri, 27 Mar 2026 19:52:38 +0100 Subject: [PATCH 26/41] working closed loop and threshold --- src/tests/automation/descriptors/automation.json | 2 +- src/tests/ofc26_flexscale/run_ofc26_test.sh | 14 ++++++++++++++ src/tests/ofc26_flexscale/test_ofc26_messages.py | 4 ++-- .../test_ofc26_mgon_integration_V2.py | 2 +- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/tests/automation/descriptors/automation.json b/src/tests/automation/descriptors/automation.json index 5e6287ec3..bef7c4aa6 100644 --- a/src/tests/automation/descriptors/automation.json +++ b/src/tests/automation/descriptors/automation.json @@ -10,7 +10,7 @@ "analyzer":{ "operation_mode": "ANALYZEROPERATIONMODE_STREAMING", "parameters": { - "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [-5, -25]}]}" + "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"last\": [-20, 5]}]}" }, "input_kpi_ids": [ {"kpi_id": { "uuid": "6e22f180-ba28-4641-b190-2287bf447777"}} diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh index 8fbdd0457..9d29118ce 100755 --- a/src/tests/ofc26_flexscale/run_ofc26_test.sh +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -20,6 +20,20 @@ cd src/ CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" +#added for kafka exposure +export KFK_SERVER_ADDRESS='127.0.0.1:9094' + +kubectl port-forward -n kafka service/kafka-public 9094:9094 > /dev/null 2>&1 & +KAFKA_PF_PID=$! + +# Function to cleanup port-forward on exit +cleanup() { + # echo "Cleaning up Kafka port-forward (PID: ${KAFKA_PF_PID})..." + kill ${KAFKA_PF_PID} 2>/dev/null || true + wait ${KAFKA_PF_PID} 2>/dev/null || true +} + + IP_KPI=$(kubectl get all --all-namespaces | grep service/kpi-managerservice | awk '{print $4}') export IP_KPI echo "KPI Manager Service IP: ${IP_KPI}" diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py index 2dbef239f..b29bb949e 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_messages.py +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -17,7 +17,7 @@ def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' - _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "<>" + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "2" # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' return _create_kpi_request @@ -35,7 +35,7 @@ devices = { 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model - 'endpoint' : '1', + 'endpoint' : '2', 'skip_verify': True }, } diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index 6f115231b..ec3a57ee0 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -104,7 +104,7 @@ def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client collector_thread = threading.Thread( target=_service.GenericCollectorHandler, args=( - _coll_id, _collector, None, _duration, _interval, + _coll_id, _collector, "6e22f180-ba28-4641-b190-2287bf447777", _duration, _interval, None, None, None, "43813baf-195e-5da6-af20-b3d0922e71a7", stop_event ), daemon=False -- GitLab From 27bb40b7dd0340cc4a2d889b5f9c60a61df7fa18 Mon Sep 17 00:00:00 2001 From: sgambelluri Date: Mon, 30 Mar 2026 11:06:51 +0200 Subject: [PATCH 27/41] pre-merge commit for optical closed automation --- .../grpc/kpi_sample_types/KpiSampleTypes.java | 24 +++++++++++++++- src/policy/target/kubernetes/kubernetes.yml | 20 ++++++------- src/tests/ofc26_flexscale/my_deploy_ofc26.sh | 2 +- .../grpc/kpi_sample_types/KpiSampleTypes.java | 24 +++++++++++++++- src/ztp/target/kubernetes/kubernetes.yml | 28 +++++++++---------- 5 files changed, 71 insertions(+), 27 deletions(-) diff --git a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java index 0c98ddbb4..3f4a7a2a6 100644 --- a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java +++ b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java @@ -71,6 +71,14 @@ public final class KpiSampleTypes { * KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; */ KPISAMPLETYPE_OPTICAL_SECURITY_STATUS(501), + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT(502), + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER(503), /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -280,6 +288,16 @@ public final class KpiSampleTypes { */ public static final int KPISAMPLETYPE_OPTICAL_SECURITY_STATUS_VALUE = 501; + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + public static final int KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT_VALUE = 502; + + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + public static final int KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER_VALUE = 503; + /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -503,6 +521,10 @@ public final class KpiSampleTypes { return KPISAMPLETYPE_ML_CONFIDENCE; case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; + case 502: + return KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT; + case 503: + return KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER; case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; case 602: @@ -628,7 +650,7 @@ public final class KpiSampleTypes { private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { - java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\346\r\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022)\n$KPISA" + "MPLETYPE_L3_UNIQUE_ATTACK_CONNS\020\331\004\022*\n%KP" + "ISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS\020\332\004\022&" + "\n!KPISAMPLETYPE_L3_UNIQUE_ATTACKERS\020\333\004\0220" + "\n+KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CL" + "IENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3_SECURITY_ST" + "ATUS_CRYPTO\020\335\004\022%\n KPISAMPLETYPE_SERVICE_" + "LATENCY_MS\020\275\005\0221\n,KPISAMPLETYPE_PACKETS_T" + "RANSMITTED_AGG_OUTPUT\020\315\010\022.\n)KPISAMPLETYP" + "E_PACKETS_RECEIVED_AGG_OUTPUT\020\316\010\022-\n(KPIS" + "AMPLETYPE_PACKETS_DROPPED_AGG_OUTPUT\020\317\010\022" + "/\n*KPISAMPLETYPE_BYTES_TRANSMITTED_AGG_O" + "UTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BYTES_RECEIVED" + "_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLETYPE_BYTES_DR" + "OPPED_AGG_OUTPUT\020\263\t\0220\n+KPISAMPLETYPE_SER" + "VICE_LATENCY_MS_AGG_OUTPUT\020\245\r\022\036\n\031KPISAMP" + "LETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPISAMPLETYPE_I" + "NT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYPE_INT_TS_EGR" + "\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP_LAT\020\324\017\022\"\n\035K" + "PISAMPLETYPE_INT_PORT_ID_ING\020\325\017\022\"\n\035KPISA" + "MPLETYPE_INT_PORT_ID_EGR\020\326\017\022\"\n\035KPISAMPLE" + "TYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032KPISAMPLETYPE" + "_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW01\020\265\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "4\020\270\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW05\020\271" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW06\020\272\020\022#" + "\n\036KPISAMPLETYPE_INT_HOP_LAT_SW07\020\273\020\022#\n\036K" + "PISAMPLETYPE_INT_HOP_LAT_SW08\020\274\020\022#\n\036KPIS" + "AMPLETYPE_INT_HOP_LAT_SW09\020\275\020\022#\n\036KPISAMP" + "LETYPE_INT_HOP_LAT_SW10\020\276\020\022#\n\036KPISAMPLET" + "YPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031KPISAMPLETYPE" + "_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLETYPE_INT_DRO" + "P_REASON\020\232\021b\006proto3" }; + java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\302\016\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022,\n\'KPISA" + "MPLETYPE_OPTICAL_POWER_TOTAL_INPUT\020\366\003\022,\n" + "\'KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER" + "\020\367\003\022)\n$KPISAMPLETYPE_L3_UNIQUE_ATTACK_CO" + "NNS\020\331\004\022*\n%KPISAMPLETYPE_L3_TOTAL_DROPPED" + "_PACKTS\020\332\004\022&\n!KPISAMPLETYPE_L3_UNIQUE_AT" + "TACKERS\020\333\004\0220\n+KPISAMPLETYPE_L3_UNIQUE_CO" + "MPROMISED_CLIENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3" + "_SECURITY_STATUS_CRYPTO\020\335\004\022%\n KPISAMPLET" + "YPE_SERVICE_LATENCY_MS\020\275\005\0221\n,KPISAMPLETY" + "PE_PACKETS_TRANSMITTED_AGG_OUTPUT\020\315\010\022.\n)" + "KPISAMPLETYPE_PACKETS_RECEIVED_AGG_OUTPU" + "T\020\316\010\022-\n(KPISAMPLETYPE_PACKETS_DROPPED_AG" + "G_OUTPUT\020\317\010\022/\n*KPISAMPLETYPE_BYTES_TRANS" + "MITTED_AGG_OUTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BY" + "TES_RECEIVED_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLET" + "YPE_BYTES_DROPPED_AGG_OUTPUT\020\263\t\0220\n+KPISA" + "MPLETYPE_SERVICE_LATENCY_MS_AGG_OUTPUT\020\245" + "\r\022\036\n\031KPISAMPLETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPI" + "SAMPLETYPE_INT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYP" + "E_INT_TS_EGR\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP" + "_LAT\020\324\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_ING" + "\020\325\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_EGR\020\326\017\022" + "\"\n\035KPISAMPLETYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032K" + "PISAMPLETYPE_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPL" + "ETYPE_INT_HOP_LAT_SW01\020\265\020\022#\n\036KPISAMPLETY" + "PE_INT_HOP_LAT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_" + "INT_HOP_LAT_SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT" + "_HOP_LAT_SW04\020\270\020\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW05\020\271\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW06\020\272\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW07\020\273\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "8\020\274\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW09\020\275" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW10\020\276\020\022#" + "\n\036KPISAMPLETYPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031K" + "PISAMPLETYPE_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLE" + "TYPE_INT_DROP_REASON\020\232\021b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); } // @@protoc_insertion_point(outer_class_scope) diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index a83d03403..a997d8582 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -3,8 +3,8 @@ apiVersion: v1 kind: Service metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -37,8 +37,8 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -57,8 +57,8 @@ spec: template: metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -75,14 +75,14 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: SERVICE_SERVICE_HOST + value: serviceservice + - name: MONITORING_SERVICE_HOST + value: monitoringservice - name: KAFKA_BROKER_HOST value: kafka-public.kafka.svc.cluster.local - name: CONTEXT_SERVICE_HOST value: contextservice - - name: MONITORING_SERVICE_HOST - value: monitoringservice - - name: SERVICE_SERVICE_HOST - value: serviceservice image: labs.etsi.org:5050/tfs/controller/policy:0.1.0 imagePullPolicy: Always livenessProbe: diff --git a/src/tests/ofc26_flexscale/my_deploy_ofc26.sh b/src/tests/ofc26_flexscale/my_deploy_ofc26.sh index 540a57646..5c9282e9f 100755 --- a/src/tests/ofc26_flexscale/my_deploy_ofc26.sh +++ b/src/tests/ofc26_flexscale/my_deploy_ofc26.sh @@ -146,7 +146,7 @@ export CRDB_DEPLOY_MODE="single" export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. -export CRDB_REDEPLOY="" +export CRDB_REDEPLOY="YES" # ----- NATS ------------------------------------------------------------------- diff --git a/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java index 0c98ddbb4..3f4a7a2a6 100644 --- a/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java +++ b/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java @@ -71,6 +71,14 @@ public final class KpiSampleTypes { * KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; */ KPISAMPLETYPE_OPTICAL_SECURITY_STATUS(501), + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT(502), + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER(503), /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -280,6 +288,16 @@ public final class KpiSampleTypes { */ public static final int KPISAMPLETYPE_OPTICAL_SECURITY_STATUS_VALUE = 501; + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + public static final int KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT_VALUE = 502; + + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + public static final int KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER_VALUE = 503; + /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -503,6 +521,10 @@ public final class KpiSampleTypes { return KPISAMPLETYPE_ML_CONFIDENCE; case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; + case 502: + return KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT; + case 503: + return KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER; case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; case 602: @@ -628,7 +650,7 @@ public final class KpiSampleTypes { private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { - java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\346\r\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022)\n$KPISA" + "MPLETYPE_L3_UNIQUE_ATTACK_CONNS\020\331\004\022*\n%KP" + "ISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS\020\332\004\022&" + "\n!KPISAMPLETYPE_L3_UNIQUE_ATTACKERS\020\333\004\0220" + "\n+KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CL" + "IENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3_SECURITY_ST" + "ATUS_CRYPTO\020\335\004\022%\n KPISAMPLETYPE_SERVICE_" + "LATENCY_MS\020\275\005\0221\n,KPISAMPLETYPE_PACKETS_T" + "RANSMITTED_AGG_OUTPUT\020\315\010\022.\n)KPISAMPLETYP" + "E_PACKETS_RECEIVED_AGG_OUTPUT\020\316\010\022-\n(KPIS" + "AMPLETYPE_PACKETS_DROPPED_AGG_OUTPUT\020\317\010\022" + "/\n*KPISAMPLETYPE_BYTES_TRANSMITTED_AGG_O" + "UTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BYTES_RECEIVED" + "_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLETYPE_BYTES_DR" + "OPPED_AGG_OUTPUT\020\263\t\0220\n+KPISAMPLETYPE_SER" + "VICE_LATENCY_MS_AGG_OUTPUT\020\245\r\022\036\n\031KPISAMP" + "LETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPISAMPLETYPE_I" + "NT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYPE_INT_TS_EGR" + "\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP_LAT\020\324\017\022\"\n\035K" + "PISAMPLETYPE_INT_PORT_ID_ING\020\325\017\022\"\n\035KPISA" + "MPLETYPE_INT_PORT_ID_EGR\020\326\017\022\"\n\035KPISAMPLE" + "TYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032KPISAMPLETYPE" + "_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW01\020\265\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "4\020\270\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW05\020\271" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW06\020\272\020\022#" + "\n\036KPISAMPLETYPE_INT_HOP_LAT_SW07\020\273\020\022#\n\036K" + "PISAMPLETYPE_INT_HOP_LAT_SW08\020\274\020\022#\n\036KPIS" + "AMPLETYPE_INT_HOP_LAT_SW09\020\275\020\022#\n\036KPISAMP" + "LETYPE_INT_HOP_LAT_SW10\020\276\020\022#\n\036KPISAMPLET" + "YPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031KPISAMPLETYPE" + "_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLETYPE_INT_DRO" + "P_REASON\020\232\021b\006proto3" }; + java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\302\016\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022,\n\'KPISA" + "MPLETYPE_OPTICAL_POWER_TOTAL_INPUT\020\366\003\022,\n" + "\'KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER" + "\020\367\003\022)\n$KPISAMPLETYPE_L3_UNIQUE_ATTACK_CO" + "NNS\020\331\004\022*\n%KPISAMPLETYPE_L3_TOTAL_DROPPED" + "_PACKTS\020\332\004\022&\n!KPISAMPLETYPE_L3_UNIQUE_AT" + "TACKERS\020\333\004\0220\n+KPISAMPLETYPE_L3_UNIQUE_CO" + "MPROMISED_CLIENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3" + "_SECURITY_STATUS_CRYPTO\020\335\004\022%\n KPISAMPLET" + "YPE_SERVICE_LATENCY_MS\020\275\005\0221\n,KPISAMPLETY" + "PE_PACKETS_TRANSMITTED_AGG_OUTPUT\020\315\010\022.\n)" + "KPISAMPLETYPE_PACKETS_RECEIVED_AGG_OUTPU" + "T\020\316\010\022-\n(KPISAMPLETYPE_PACKETS_DROPPED_AG" + "G_OUTPUT\020\317\010\022/\n*KPISAMPLETYPE_BYTES_TRANS" + "MITTED_AGG_OUTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BY" + "TES_RECEIVED_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLET" + "YPE_BYTES_DROPPED_AGG_OUTPUT\020\263\t\0220\n+KPISA" + "MPLETYPE_SERVICE_LATENCY_MS_AGG_OUTPUT\020\245" + "\r\022\036\n\031KPISAMPLETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPI" + "SAMPLETYPE_INT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYP" + "E_INT_TS_EGR\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP" + "_LAT\020\324\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_ING" + "\020\325\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_EGR\020\326\017\022" + "\"\n\035KPISAMPLETYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032K" + "PISAMPLETYPE_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPL" + "ETYPE_INT_HOP_LAT_SW01\020\265\020\022#\n\036KPISAMPLETY" + "PE_INT_HOP_LAT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_" + "INT_HOP_LAT_SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT" + "_HOP_LAT_SW04\020\270\020\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW05\020\271\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW06\020\272\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW07\020\273\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "8\020\274\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW09\020\275" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW10\020\276\020\022#" + "\n\036KPISAMPLETYPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031K" + "PISAMPLETYPE_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLE" + "TYPE_INT_DROP_REASON\020\232\021b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); } // @@protoc_insertion_point(outer_class_scope) diff --git a/src/ztp/target/kubernetes/kubernetes.yml b/src/ztp/target/kubernetes/kubernetes.yml index 9d2196ea6..da7022906 100644 --- a/src/ztp/target/kubernetes/kubernetes.yml +++ b/src/ztp/target/kubernetes/kubernetes.yml @@ -3,8 +3,8 @@ apiVersion: v1 kind: Service metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -16,10 +16,6 @@ metadata: name: ztp spec: ports: - - name: https - port: 443 - protocol: TCP - targetPort: 8443 - name: http port: 80 protocol: TCP @@ -28,6 +24,10 @@ spec: port: 9000 protocol: TCP targetPort: 9000 + - name: https + port: 443 + protocol: TCP + targetPort: 8443 selector: app.kubernetes.io/name: ztp app.kubernetes.io/version: 0.2.0 @@ -37,8 +37,8 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -57,8 +57,8 @@ spec: template: metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -74,7 +74,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: ubuntu/ztp:0.2.0 + image: tfs/ztp:0.2.0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 @@ -88,15 +88,15 @@ spec: timeoutSeconds: 10 name: ztp ports: - - containerPort: 8443 - name: https - protocol: TCP - containerPort: 8080 name: http protocol: TCP - containerPort: 9000 name: grpc protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: -- GitLab From c2952cca04bc961c83c0d5d349361f82900b768a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:14:57 +0000 Subject: [PATCH 28/41] Pre-merge code cleanup --- my_deploy.sh | 4 +- ofc26.sh | 229 ------------------ .../{my_deploy_ofc26.sh => deploy_specs.sh} | 0 3 files changed, 2 insertions(+), 231 deletions(-) delete mode 100644 ofc26.sh rename src/tests/ofc26_flexscale/{my_deploy_ofc26.sh => deploy_specs.sh} (100%) diff --git a/my_deploy.sh b/my_deploy.sh index ac9167832..24e1f6902 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -20,7 +20,7 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" +export TFS_COMPONENTS="context device pathcomp service nbi webui" # Uncomment to activate Monitoring (old) #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" @@ -143,7 +143,7 @@ export CRDB_PASSWORD="tfs123" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" +export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" diff --git a/ofc26.sh b/ofc26.sh deleted file mode 100644 index e6926b64e..000000000 --- a/ofc26.sh +++ /dev/null @@ -1,229 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# ----- TeraFlowSDN ------------------------------------------------------------ - -# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" - -# Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" - -# Uncomment to activate Monitoring (old) -#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" - -# Uncomment to activate Monitoring Framework (new) -export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" - -# Uncomment to activate QoS Profiles -#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" - -# Uncomment to activate BGP-LS Speaker -#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" - -# Uncomment to activate Optical Controller -# To manage optical connections, "service" requires "opticalcontroller" to be deployed -# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the -# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. -#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then -# BEFORE="${TFS_COMPONENTS% service*}" -# AFTER="${TFS_COMPONENTS#* service}" -# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" -#fi - -# Uncomment to activate ZTP -#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" - -# Uncomment to activate Policy Manager -#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" - -# Uncomment to activate Optical CyberSecurity -#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" - -# Uncomment to activate L3 CyberSecurity -#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" - -# Uncomment to activate TE -#export TFS_COMPONENTS="${TFS_COMPONENTS} te" - -# Uncomment to activate Forecaster -#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" - -# Uncomment to activate E2E Orchestrator -#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" - -# Uncomment to activate VNT Manager -#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" - -# Uncomment to activate OSM Client -#export TFS_COMPONENTS="${TFS_COMPONENTS} osm_client" - -# Uncomment to activate DLT and Interdomain -#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" -#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then -# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" -# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" -# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" -#fi - -# Uncomment to activate QKD App -# To manage QKD Apps, "service" requires "qkd_app" to be deployed -# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the -# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. -#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then -# BEFORE="${TFS_COMPONENTS% service*}" -# AFTER="${TFS_COMPONENTS#* service}" -# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" -#fi - -# Uncomment to activate SIMAP Connector -#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" - -# Uncomment to activate Load Generator -#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" - - -# Set the tag you want to use for your images. -export TFS_IMAGE_TAG="dev" - -# Set the name of the Kubernetes namespace to deploy TFS to. -export TFS_K8S_NAMESPACE="tfs" - -# Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" - -# Uncomment to monitor performance of components -#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" - -# Uncomment when deploying Optical CyberSecurity -#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" - -# Set the new Grafana admin password -export TFS_GRAFANA_PASSWORD="admin123+" - -# Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="" - - -# ----- CockroachDB ------------------------------------------------------------ - -# Set the namespace where CockroackDB will be deployed. -export CRDB_NAMESPACE="crdb" - -# Set the external port CockroackDB Postgre SQL interface will be exposed to. -export CRDB_EXT_PORT_SQL="26257" - -# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. -export CRDB_EXT_PORT_HTTP="8081" - -# Set the database username to be used by Context. -export CRDB_USERNAME="tfs" - -# Set the database user's password to be used by Context. -export CRDB_PASSWORD="tfs123" - -# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. -# See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="single" - -# Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" - -# Disable flag for re-deploying CockroachDB from scratch. -export CRDB_REDEPLOY="" - - -# ----- NATS ------------------------------------------------------------------- - -# Set the namespace where NATS will be deployed. -export NATS_NAMESPACE="nats" - -# Set the external port NATS Client interface will be exposed to. -export NATS_EXT_PORT_CLIENT="4222" - -# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. -export NATS_EXT_PORT_HTTP="8222" - -# Set NATS installation mode to 'single'. This option is convenient for development and testing. -# See ./deploy/all.sh or ./deploy/nats.sh for additional details -export NATS_DEPLOY_MODE="single" - -# Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="" - - -# ----- Apache Kafka ----------------------------------------------------------- - -# Set the namespace where Apache Kafka will be deployed. -export KFK_NAMESPACE="kafka" - -# Set the port Apache Kafka server will be exposed to. -export KFK_EXT_PORT_CLIENT="9092" - -# Set Kafka installation mode to 'single'. This option is convenient for development and testing. -# See ./deploy/all.sh or ./deploy/kafka.sh for additional details -export KFK_DEPLOY_MODE="single" - -# Disable flag for re-deploying Kafka from scratch. -export KFK_REDEPLOY="" - - -# ----- QuestDB ---------------------------------------------------------------- - -# Set the namespace where QuestDB will be deployed. -export QDB_NAMESPACE="qdb" - -# Set the external port QuestDB Postgre SQL interface will be exposed to. -export QDB_EXT_PORT_SQL="8812" - -# Set the external port QuestDB Influx Line Protocol interface will be exposed to. -export QDB_EXT_PORT_ILP="9009" - -# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. -export QDB_EXT_PORT_HTTP="9000" - -# Set the database username to be used for QuestDB. -export QDB_USERNAME="admin" - -# Set the database user's password to be used for QuestDB. -export QDB_PASSWORD="quest" - -# Set the table name to be used by Monitoring for KPIs. -export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" - -# Set the table name to be used by Slice for plotting groups. -export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" - -# Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="" - -# Disable flag for re-deploying QuestDB from scratch. -export QDB_REDEPLOY="" - - -# ----- K8s Observability ------------------------------------------------------ - -# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. -export PROM_EXT_PORT_HTTP="9090" - -# Set the external port Grafana HTTP Dashboards will be exposed to. -export GRAF_EXT_PORT_HTTP="3000" - - -# ----- Telemetry Config ------------------------------------------------------ - -# Define a Load Balancer IP for Telemetry Collector components -export LOAD_BALANCER_IP="192.168.5.250" # <-- Change this to match your network diff --git a/src/tests/ofc26_flexscale/my_deploy_ofc26.sh b/src/tests/ofc26_flexscale/deploy_specs.sh similarity index 100% rename from src/tests/ofc26_flexscale/my_deploy_ofc26.sh rename to src/tests/ofc26_flexscale/deploy_specs.sh -- GitLab From 8387d16f46db041f71ac801689292f722a06c533 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:22:50 +0000 Subject: [PATCH 29/41] Kafka deployment: - Fix kafka service name --- src/common/tools/kafka/Variables.py | 2 +- src/policy/src/main/resources/application.yml | 2 +- src/policy/target/kubernetes/kubernetes.yml | 2 +- src/tests/ofc26_flexscale/run_ofc26_test.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index dd3d9e07b..5a8e68215 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -19,7 +19,7 @@ from kafka.errors import TopicAlreadyExistsError from common.Settings import get_setting LOGGER = logging.getLogger(__name__) -KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-public.{:s}.svc.cluster.local:{:s}' +KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' KAFKA_TOPIC_NUM_PARTITIONS = 1 KAFKA_TOPIC_REPLICATION_FACTOR = 1 diff --git a/src/policy/src/main/resources/application.yml b/src/policy/src/main/resources/application.yml index 7292f222a..ccfbffdf5 100644 --- a/src/policy/src/main/resources/application.yml +++ b/src/policy/src/main/resources/application.yml @@ -63,7 +63,7 @@ quarkus: context-service-host: "contextservice" monitoring-service-host: "monitoringservice" service-service-host: "serviceservice" - kafka-broker-host: "kafka-public.kafka.svc.cluster.local" + kafka-broker-host: "kafka-service.kafka.svc.cluster.local" resources: requests: cpu: 50m diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index a997d8582..2af0c3587 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -80,7 +80,7 @@ spec: - name: MONITORING_SERVICE_HOST value: monitoringservice - name: KAFKA_BROKER_HOST - value: kafka-public.kafka.svc.cluster.local + value: kafka-service.kafka.svc.cluster.local - name: CONTEXT_SERVICE_HOST value: contextservice image: labs.etsi.org:5050/tfs/controller/policy:0.1.0 diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh index 9d29118ce..b437cc90f 100755 --- a/src/tests/ofc26_flexscale/run_ofc26_test.sh +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -23,7 +23,7 @@ export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt #added for kafka exposure export KFK_SERVER_ADDRESS='127.0.0.1:9094' -kubectl port-forward -n kafka service/kafka-public 9094:9094 > /dev/null 2>&1 & +kubectl port-forward -n kafka service/kafka-service 9094:9094 > /dev/null 2>&1 & KAFKA_PF_PID=$! # Function to cleanup port-forward on exit -- GitLab From 81d8c3a912f6443c1927989ac1b95739597eed87 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:29:50 +0000 Subject: [PATCH 30/41] Kafka deployment: - Fix kafka service port --- manifests/kafka/single-node.yaml | 2 +- scripts/run_tests_locally-analytics-backend.sh | 4 ++-- scripts/run_tests_locally-analytics-frontend.sh | 2 +- scripts/run_tests_locally-telemetry-gnmi.sh | 2 +- .../tests/gnmi_oc/test_integration_GnmiOCcollector.py | 2 +- src/tests/ofc26_flexscale/run_ofc26_test.sh | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index 6eb9bd03c..af0d67216 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -83,7 +83,7 @@ spec: - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP value: "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT" - name: KAFKA_CFG_ADVERTISED_LISTENERS - value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9094" + value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9092" # local clients use kubectl port-forward ... 9092:external - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES value: "CONTROLLER" - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS diff --git a/scripts/run_tests_locally-analytics-backend.sh b/scripts/run_tests_locally-analytics-backend.sh index 44e26bacb..fae768939 100755 --- a/scripts/run_tests_locally-analytics-backend.sh +++ b/scripts/run_tests_locally-analytics-backend.sh @@ -19,7 +19,7 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9094' +export KFK_SERVER_ADDRESS='127.0.0.1:9092' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" @@ -28,4 +28,4 @@ export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytic # analytics/backend/tests/test_backend.py python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ - analytics/backend/tests/test_backend.py::test_start_analytics_backend_for_mgon_agent \ No newline at end of file + analytics/backend/tests/test_backend.py::test_start_analytics_backend_for_mgon_agent diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh index 4d999f625..4a90ec060 100755 --- a/scripts/run_tests_locally-analytics-frontend.sh +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -19,7 +19,7 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9094' +export KFK_SERVER_ADDRESS='127.0.0.1:9092' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" diff --git a/scripts/run_tests_locally-telemetry-gnmi.sh b/scripts/run_tests_locally-telemetry-gnmi.sh index 965141e56..6825e42a5 100755 --- a/scripts/run_tests_locally-telemetry-gnmi.sh +++ b/scripts/run_tests_locally-telemetry-gnmi.sh @@ -17,7 +17,7 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9094' +export KFK_SERVER_ADDRESS='127.0.0.1:9092' # This is unit test (should be tested with container-lab running) python3 -m pytest --log-level=info --log-cli-level=info --verbose \ diff --git a/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py b/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py index 6543c8743..d200fc0b7 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py @@ -164,7 +164,7 @@ def telemetry_backend_service(): # + Uncomment test_add_to_topology() in helper methods section to add a device. # - A KPI Descriptor must be added in KPI DB with correct device_id. # + Uncomment test_SetKpiDescriptor() in helper methods section to add a KPI Descriptor. - # - Kafka should be exposed externally 'kubectl port-forward -n kafka service/kafka-service 9094:9094'. + # - Kafka should be exposed externally 'kubectl port-forward -n kafka service/kafka-service 9092:9092'. def test_helper_get_collector_by_kpi_id(kpi_manager_client, context_client): LOGGER.info("Testing get_collector_by_kpi_id...") diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh index b437cc90f..2644320f5 100755 --- a/src/tests/ofc26_flexscale/run_ofc26_test.sh +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -21,9 +21,9 @@ CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" #added for kafka exposure -export KFK_SERVER_ADDRESS='127.0.0.1:9094' +export KFK_SERVER_ADDRESS='127.0.0.1:9092' -kubectl port-forward -n kafka service/kafka-service 9094:9094 > /dev/null 2>&1 & +kubectl port-forward -n kafka service/kafka-service 9092:9092 > /dev/null 2>&1 & KAFKA_PF_PID=$! # Function to cleanup port-forward on exit @@ -44,4 +44,4 @@ echo "Telemetry Frontend Service IP: ${IP_TELE}" python -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ - tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py \ No newline at end of file + tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py -- GitLab From b0c99012a3dd8e9944bcfb3afd3b7d2e22c88173 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:39:09 +0000 Subject: [PATCH 31/41] Pre-merge code cleanup --- .../service/task_scheduler/TaskScheduler.py | 223 +++++++++--------- 1 file changed, 106 insertions(+), 117 deletions(-) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index c9eaff31c..a9a1a20e7 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -307,9 +307,8 @@ class TasksScheduler: else : has_optical_band = True return (has_media_channel, has_optical_band) - - - + + def compose_from_opticalcontroller_reply( self, pathcomp_reply : PathCompReply, is_delete : bool = False ) -> None: @@ -322,50 +321,48 @@ class TasksScheduler: has_optical_band = None for service in pathcomp_reply.services: + connections = self._context_client.ListConnections(service.service_id) + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=service.service_id + ) + + include_service( + service.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._add_service_to_executor_cache(service) - connections = self._context_client.ListConnections(service.service_id) - has_media_channel, has_optical_band = self.check_service_for_media_channel( - connections=connections, item=service.service_id - ) - - - include_service(service.service_id , has_media_channel=has_media_channel, has_optical_band=has_optical_band) - self._add_service_to_executor_cache(service) - - for connection in connections.connections: - self._add_connection_to_executor_cache(connection) - - + for connection in connections.connections: + self._add_connection_to_executor_cache(connection) for connection in pathcomp_reply.connections: - - connection_key = include_connection( - connection.connection_id, connection.service_id, has_media_channel=has_media_channel, - has_optical_band=has_optical_band - ) - self._add_connection_to_executor_cache(connection) + connection_key = include_connection( + connection.connection_id, connection.service_id, + has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._add_connection_to_executor_cache(connection) - self._executor.get_service(connection.service_id) - for sub_service_id in connection.sub_service_ids: - _,service_key_done = include_service( - sub_service_id, has_media_channel=has_media_channel, - has_optical_band=has_optical_band - ) - self._executor.get_service(sub_service_id) - self._dag.add(connection_key, service_key_done) + self._executor.get_service(connection.service_id) + for sub_service_id in connection.sub_service_ids: + _,service_key_done = include_service( + sub_service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._executor.get_service(sub_service_id) + self._dag.add(connection_key, service_key_done) t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) - - - + + def compose_from_service_expansion( self, service :Service, ) -> None: t0 = time.time() include_service = self._optical_service_create include_connection = self._optical_connection_configure - + logging.debug(f"after setting the config {service}") #pending_items_to_explore.put(service) has_media_channel = None @@ -373,17 +370,16 @@ class TasksScheduler: if service is None : raise NotFoundException('Service', service, extra_details=[ 'service not found ' ]) - - - + connections = self._context_client.ListConnections(service.service_id) has_media_channel, has_optical_band = self.check_service_for_media_channel( connections=connections, item=service.service_id ) - - _,service_key_done= include_service(service.service_id , - has_media_channel=has_media_channel, - has_optical_band=has_optical_band) + + _,service_key_done= include_service( + service.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) # self._add_service_to_executor_cache(service) service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING @@ -391,7 +387,8 @@ class TasksScheduler: self._add_service_to_executor_cache(service) for connection in connections.connections: connection_key = include_connection( - connection.connection_id, connection.service_id, has_media_channel=has_media_channel, + connection.connection_id, connection.service_id, + has_media_channel=has_media_channel, has_optical_band=has_optical_band ) self._add_connection_to_executor_cache(connection) @@ -399,7 +396,9 @@ class TasksScheduler: t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) - def compose_from_optical_service(self, service : Service, params:dict, is_delete : bool = False) -> None: + def compose_from_optical_service( + self, service : Service, params:dict, is_delete : bool = False + ) -> None: t0 = time.time() include_service = self._optical_service_remove if is_delete else self._service_create include_connection = self._optical_connection_deconfigure if is_delete else self._connection_configure @@ -408,127 +407,118 @@ class TasksScheduler: explored_items = set() pending_items_to_explore = queue.Queue() pending_items_to_explore.put(service) - has_media_channel=None - has_optical_band=None - reply=None - code=0 - reply_not_allowed="DELETE_NOT_ALLOWED" + has_media_channel = None + has_optical_band = None + reply = None + code = 0 + reply_not_allowed = "DELETE_NOT_ALLOWED" while not pending_items_to_explore.empty(): try: item = pending_items_to_explore.get(block=False) - except queue.Empty: break - + if isinstance(item, Service): - str_item_key = grpc_message_to_json_string(item.service_id) if str_item_key in explored_items: continue connections = self._context_client.ListConnections(item.service_id) - has_media_channel,has_optical_band=self.check_service_for_media_channel(connections=connections,item=item.service_id) - oc_type = 1 + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=item.service_id + ) + oc_type = 1 if len(service.service_config.config_rules) > 0: for constraint in service.service_constraints: if "type" in constraint.custom.constraint_type: oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) - if oc_type == 2 : - reply,code = delete_lightpath( - params['src'] - , params ['dst'] - , params['bitrate'] - , flow_id= params['flow_id'] - ) - else : - reply,code = DelFlexLightpath( - params['src'] - , params ['dst'] - , params['bitrate'] - , params['ob_id'] - , flow_id=params['flow_id'] - ) + if oc_type == 2: + reply, code = delete_lightpath( + params['src'], params ['dst'], params['bitrate'], + flow_id= params['flow_id'] + ) + else: + reply, code = DelFlexLightpath( + params['src'], params ['dst'], params['bitrate'], + params['ob_id'], flow_id=params['flow_id'] + ) if code == 400 and reply_not_allowed in reply : MSG = 'Deleteion for the service is not Allowed , Served Lightpaths is not empty' raise Exception(MSG) - include_service(item.service_id,has_media_channel=has_media_channel,has_optical_band=has_optical_band) + include_service( + item.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._add_service_to_executor_cache(item) - - + for connection in connections.connections: - self._add_connection_to_executor_cache(connection) - pending_items_to_explore.put(connection) + self._add_connection_to_executor_cache(connection) + pending_items_to_explore.put(connection) explored_items.add(str_item_key) - elif isinstance(item, ServiceId): - - if code == 400 and reply_not_allowed in reply:break - + if code == 400 and reply_not_allowed in reply: break + str_item_key = grpc_message_to_json_string(item) if str_item_key in explored_items: continue connections = self._context_client.ListConnections(item) - has_media_channel,has_optical_band=self.check_service_for_media_channel(connections=connections,item=item) + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=item + ) + + include_service( + item, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) - - include_service(item,has_media_channel=has_media_channel,has_optical_band=has_optical_band) - - self._executor.get_service(item) - + for connection in connections.connections: - self._add_connection_to_executor_cache(connection) pending_items_to_explore.put(connection) - + explored_items.add(str_item_key) elif isinstance(item, Connection): if code == 400 and reply_not_allowed in reply:break str_item_key = grpc_message_to_json_string(item.connection_id) if str_item_key in explored_items: continue - - - connection_key = include_connection( item.connection_id - , item.service_id - , has_media_channel=has_media_channel - , has_optical_band=has_optical_band ) + + connection_key = include_connection( + item.connection_id, item.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._add_connection_to_executor_cache(connection) - + if include_service_config is not None : - connections_list = ConnectionList() - connections_list.connections.append(item) - - is_media_channel,_=self.check_service_for_media_channel(connections=connections_list,item=service) - - if has_optical_band and is_media_channel: - include_service_config(item.connection_id - , item.service_id ) - + connections_list = ConnectionList() + connections_list.connections.append(item) + + is_media_channel,_=self.check_service_for_media_channel( + connections=connections_list,item=service + ) + + if has_optical_band and is_media_channel: + include_service_config(item.connection_id, item.service_id) self._executor.get_service(item.service_id) pending_items_to_explore.put(item.service_id) - - + for sub_service_id in item.sub_service_ids: - _,service_key_done = include_service(sub_service_id - ,has_media_channel=has_media_channel - ,has_optical_band=has_optical_band) + _,service_key_done = include_service( + sub_service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._executor.get_service(sub_service_id) self._dag.add(service_key_done, connection_key) pending_items_to_explore.put(sub_service_id) - - explored_items.add(str_item_key) - - else: MSG = 'Unsupported item {:s}({:s})' raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item))) - - t1 = time.time() - LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + t1 = time.time() + LOGGER.debug('[compose_from_optical_service] elapsed_time: {:f} sec'.format(t1-t0)) def compose_from_service(self, service : Service, is_delete : bool = False) -> None: @@ -641,13 +631,12 @@ class TasksScheduler: self._dag.add(service_active_key, new_connection_configure_key) t1 = time.time() - LOGGER.debug('[RRERRSF] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_optical_service_update] elapsed_time: {:f} sec'.format(t1-t0)) def compose_optical_service_update1( self, service : Service, old_connection : Connection, new_connection : Connection ) -> None: - LOGGER.debug('[ttttttttttt] elapsed_time inside update1') t0 = time.time() self._add_service_to_executor_cache(service) @@ -686,7 +675,7 @@ class TasksScheduler: self._dag.add(service_active_key, new_connection_configure_key) t1 = time.time() - LOGGER.debug('[RRERRSF] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_optical_service_update1] elapsed_time: {:f} sec'.format(t1-t0)) def compose_service_connection_update( @@ -743,7 +732,7 @@ class TasksScheduler: task = self._tasks.get(task_key) succeeded = True if dry_run else task.execute() results.append(succeeded) - LOGGER.debug('[execute_allRRRR] finished task {:s} ; succeeded={:s}'.format(str_task_name, str(succeeded))) + LOGGER.debug('[execute_all] finished task {:s} ; succeeded={:s}'.format(str_task_name, str(succeeded))) LOGGER.debug('[execute_all] results={:s}'.format(str(results))) return zip(ordered_task_keys, results) -- GitLab From 354e02de1f13a762139a614f8c12da95859aad2b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:41:49 +0000 Subject: [PATCH 32/41] Kafka deployment: - Fix kafka service port --- manifests/kafka/single-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index af0d67216..ead717849 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -83,7 +83,7 @@ spec: - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP value: "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT" - name: KAFKA_CFG_ADVERTISED_LISTENERS - value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9092" # local clients use kubectl port-forward ... 9092:external + value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9094" # local clients use kubectl port-forward ... 9092:external - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES value: "CONTROLLER" - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS -- GitLab From 6b70c5f9edca49721e310a94e71170538bbdc577 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:41:56 +0000 Subject: [PATCH 33/41] Pre-merge code cleanup --- src/analytics/backend/service/Streamer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/analytics/backend/service/Streamer.py b/src/analytics/backend/service/Streamer.py index ece1107cf..a8c9bffc6 100644 --- a/src/analytics/backend/service/Streamer.py +++ b/src/analytics/backend/service/Streamer.py @@ -25,13 +25,14 @@ from analytics.backend.service.AnalyzerHelper import AnalyzerHelper logger = logging.getLogger(__name__) class DaskStreamer(threading.Thread): - def __init__(self, key, input_kpis, output_kpis, thresholds, + def __init__( + self, key, input_kpis, output_kpis, thresholds, batch_size = 5, batch_duration = None, window_size = None, cluster_instance = None, producer_instance = AnalyzerHelper.initialize_kafka_producer() - ) -> None: + ) -> None: super().__init__() self.key = key self.input_kpis = input_kpis -- GitLab From a6a914c9b7675563bc97796c39d01b27347bd223 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:54:16 +0000 Subject: [PATCH 34/41] Automation component - Plugins: - Added pllugin for optical connections - Fixed filter fields --- .../zsm_handler_api/ZSMFilterFields.py | 10 +- .../service/zsm_handlers/OpticalZSMPlugin.py | 104 ++++++++++++++++++ .../service/zsm_handlers/P4INTZSMPlugin.py | 6 +- .../service/zsm_handlers/__init__.py | 7 ++ 4 files changed, 120 insertions(+), 7 deletions(-) create mode 100644 src/automation/service/zsm_handlers/OpticalZSMPlugin.py diff --git a/src/automation/service/zsm_handler_api/ZSMFilterFields.py b/src/automation/service/zsm_handler_api/ZSMFilterFields.py index ab91e1bc5..e6c2fb844 100644 --- a/src/automation/service/zsm_handler_api/ZSMFilterFields.py +++ b/src/automation/service/zsm_handler_api/ZSMFilterFields.py @@ -16,19 +16,21 @@ from enum import Enum from common.proto.context_pb2 import ServiceTypeEnum class ZSMFilterFieldEnum(Enum): - TARGET_SERVICE_TYPE = 'target_service_type' + TARGET_SERVICE_TYPE = 'target_service_type' TELEMETRY_SERVICE_TYPE = 'telemetry_service_type' TARGET_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, } TELEMETRY_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + ServiceTypeEnum.SERVICETYPE_INT, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, } # Maps filter fields to allowed values per Filter field. # If no restriction (free text) None is specified ZSM_FILTER_FIELD_ALLOWED_VALUES = { - ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value : TARGET_SERVICE_TYPE_VALUES, + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value : TARGET_SERVICE_TYPE_VALUES, ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE.value : TELEMETRY_SERVICE_TYPE_VALUES, } diff --git a/src/automation/service/zsm_handlers/OpticalZSMPlugin.py b/src/automation/service/zsm_handlers/OpticalZSMPlugin.py new file mode 100644 index 000000000..6baacfde6 --- /dev/null +++ b/src/automation/service/zsm_handlers/OpticalZSMPlugin.py @@ -0,0 +1,104 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from uuid import uuid4 +from common.proto.analytics_frontend_pb2 import AnalyzerId +from common.proto.policy_pb2 import PolicyRuleState +from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService + +from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient +from automation.client.PolicyClient import PolicyClient +from context.client.ContextClient import ContextClient +from automation.service.zsm_handler_api._ZSMHandler import _ZSMHandler + + +LOGGER = logging.getLogger(__name__) + + +class OpticalZSMPlugin(_ZSMHandler): + def __init__(self): + LOGGER.info('Init OpticalZSMPlugin') + + def zsmCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext): # type: ignore + context_client = ContextClient() + policy_client = PolicyClient() + analytics_frontend_client = AnalyticsFrontendClient() + + # Verify the input target service ID + try: + target_service_id = context_client.GetService(request.target_service_id) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to get target service:\n{str(target_service_id)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + return self._zsm_create_response_empty() + + # Verify the input telemetry service ID + try: + telemetry_service_id = context_client.GetService(request.telemetry_service_id) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to get telemetry service:\n{str(telemetry_service_id)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + return self._zsm_create_response_empty() + + # Start an analyzer + try: + analyzer_id: AnalyzerId = analytics_frontend_client.StartAnalyzer(request.analyzer) # type: ignore + LOGGER.info('Analyzer_id:\n{:s}'.format(str(analyzer_id))) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to start Analyzer:\n{str(request.analyzer)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + analytics_frontend_client.close() + return self._zsm_create_response_empty() + + # Create a policy + try: + LOGGER.info(f'Policy:\n{str(request.policy)}') + policy_rule_state: PolicyRuleState = policy_client.PolicyAddService(request.policy) # type: ignore + LOGGER.info(f'Policy rule state:\n{policy_rule_state}') + except Exception as ex: + LOGGER.exception(f'Unable to create policy:\n{str(request.policy)}') + LOGGER.exception(ex.code()) + # ToDo: Investigate why PolicyAddService throws exception + # if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + # context_client.close() + # policy_client.close() + # return self._zsm_create_response_empty() + + context_client.close() + analytics_frontend_client.close() + policy_client.close() + return self._zsm_create_response(request) + + def zsmDelete(self): + LOGGER.info('zsmDelete method') + + def zsmGetById(self): + LOGGER.info('zsmGetById method') + + def zsmGetByService(self): + LOGGER.info('zsmGetByService method') + + def _zsm_create_response(self, request): + response = ZSMService() + automation_id = str(uuid4()) + response.zsmServiceId.uuid.uuid = automation_id + response.serviceId.service_uuid.uuid = request.target_service_id.service_uuid.uuid + return response + + def _zsm_create_response_empty(self): + return ZSMService() diff --git a/src/automation/service/zsm_handlers/P4INTZSMPlugin.py b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py index f3ea519a6..74694a376 100644 --- a/src/automation/service/zsm_handlers/P4INTZSMPlugin.py +++ b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py @@ -74,9 +74,9 @@ class P4INTZSMPlugin(_ZSMHandler): LOGGER.exception(ex.code()) # ToDo: Investigate why PolicyAddService throws exception # if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - # context_client.close() - # policy_client.close() - # return self._zsm_create_response_empty() + context_client.close() + policy_client.close() + return self._zsm_create_response_empty() context_client.close() analytics_frontend_client.close() diff --git a/src/automation/service/zsm_handlers/__init__.py b/src/automation/service/zsm_handlers/__init__.py index abb5588f8..79e703ba8 100644 --- a/src/automation/service/zsm_handlers/__init__.py +++ b/src/automation/service/zsm_handlers/__init__.py @@ -15,9 +15,16 @@ from common.proto.context_pb2 import ServiceTypeEnum from ..zsm_handler_api.ZSMFilterFields import ZSMFilterFieldEnum from automation.service.zsm_handlers.P4INTZSMPlugin import P4INTZSMPlugin +from automation.service.zsm_handlers.OpticalZSMPlugin import OpticalZSMPlugin ZSM_SERVICE_HANDLERS = [ (P4INTZSMPlugin, [ + { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + } + ]), + (OpticalZSMPlugin, [ { ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, -- GitLab From d373581daf283f3ace29a27a7887ed7f7c7b9e16 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 14:54:59 +0000 Subject: [PATCH 35/41] Pre-merge code cleanup --- manifests/kafka/single-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index ead717849..6eb9bd03c 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -83,7 +83,7 @@ spec: - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP value: "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT" - name: KAFKA_CFG_ADVERTISED_LISTENERS - value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9094" # local clients use kubectl port-forward ... 9092:external + value: "PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092,EXTERNAL://localhost:9094" - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES value: "CONTROLLER" - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS -- GitLab From 89d60b34f14190a71622bfadf68e2cf3c6e4a46e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 15:03:12 +0000 Subject: [PATCH 36/41] Pre-merge code cleanup --- .../service/ServiceServiceServicerImpl.py | 31 ++----------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 03bd27fc3..c71123573 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -343,9 +343,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): reply_txt = add_lightpath(src, dst, bitrate, bidir) else: reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, preferred, ob_band, dj_optical_band_id) - #logging.info(f"TEEEEEEEEEEEEEEST {oc_type}") - #logging.info(f"POLIMI {reply_txt}") - if reply_txt == None: + if reply_txt is None: return service_with_uuids.service_id reply_json = json.loads(reply_txt) LOGGER.debug('[optical] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) @@ -596,11 +594,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): updated_service_with_uuids = get_service_by_id( context_client, updated_service_id_with_uuids, rw_copy=True, include_config_rules=True, include_constraints=True, include_endpoint_ids=True) - LOGGER.info('WYY:{}'.format(updated_service_with_uuids)) # Get active connection connections = context_client.ListConnections(updated_service_id_with_uuids) - LOGGER.info('WWWW:{}'.format(connections)) if len(connections.connections) == 0: MSG = 'Service({:s}) has no connections' str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) @@ -624,7 +620,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): raise NotImplementedException('service-connection-with-subservices', extra_details=str_extra_details) if updated_service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: - LOGGER.info('WWWW:{}'.format("is optical")) context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) topology_id_x = json_topology_id( DEFAULT_TOPOLOGY_NAME, context_id_x) @@ -632,8 +627,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): TopologyId(**topology_id_x)) str_old_connection = connection_to_string(old_connection) - LOGGER.info('WWW old_connection={}'.format(grpc_message_to_json_string(old_connection))) - LOGGER.info('WWW0={}'.format(updated_service_with_uuids.service_config.config_rules)) if len(updated_service_with_uuids.service_config.config_rules)> 0: #if len(updated_service.service_config.config_rules) > 0: c_rules_dict = json.loads( @@ -641,7 +634,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): #c_rules_dict = json.loads( # updated_service.service_config.config_rules[0].custom.resource_value) - LOGGER.info('WWW1:{}'.format(c_rules_dict)) flow_id=None #if "ob_id" in c_rules_dict: # ob_id = c_rules_dict["ob_id"] @@ -668,15 +660,12 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): new_connection = optical_reply.connections[0] #for candidate_new_connection in pathcomp_reply.connections: str_candidate_new_connection = connection_to_string(new_connection) - LOGGER.info('QQQQ_old:{}'.format(str_old_connection)) - LOGGER.info('QQQQ_new:{}'.format(str_candidate_new_connection)) # Change UUID of new connection to prevent collisions tmp_connection = Connection() tmp_connection.CopyFrom(new_connection) tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) new_connection = tmp_connection service_new = optical_reply.services[0] - LOGGER.info('QQQQ:{}'.format(service_new)) # Feed TaskScheduler with the service to update, the old connection to # deconfigure and the new connection to configure. It will produce a # schedule of tasks (an ordered list of tasks to be executed) to @@ -689,7 +678,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.execute_all() else: if ("ob_id" in c_rules_dict) and ("low-freq" in c_rules_dict): - LOGGER.info('PDP: it is an optical band') ob_id = c_rules_dict["ob_id"] band_txt = get_optical_band(ob_id) optical_band = json.loads(band_txt) @@ -697,14 +685,12 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): optical_band = None obs = context_client.GetOpticalBand() for obz in obs: - LOGGER.info(f"PDP: {obz.opticalband_id}") if obz.opticalband_id == ob_id: optical_band = obz if optical_band is not None: ''' #optical_band = context_client.SelectOpticalBand(ob_id) served_flows = optical_band.get('served_lightpaths') - LOGGER.info(f'PDP: served flows {served_flows}') #context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) #response = context_client.ListServices(context_id_x) @@ -720,7 +706,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ) if ("flow_id" in c_rules_dict): flow_id = c_rules_dict["flow_id"] - LOGGER.info(f"PDP checking {flow_id} and {served_flows}") + LOGGER.info(f"Checking {flow_id} and {served_flows}") if flow_id in served_flows: ########## updated_service : Optional[Service] = get_service_by_id( @@ -733,27 +719,17 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): updated_service_with_uuids = get_service_by_id( context_client, updated_service_id_with_uuids, rw_copy=True, include_config_rules=True, include_constraints=True, include_endpoint_ids=True) - LOGGER.info('WYY:{}'.format(updated_service_with_uuids)) # Get active connection connections = context_client.ListConnections(updated_service_id_with_uuids) - LOGGER.info('WWWW:{}'.format(connections)) old_connection = connections.connections[0] - ''' - for service_idc in service_ids: - service_d = context_client.GetService(service_idc) - c_rules_dict = json.loads( - service_d.service_config.config_rules[0].custom.resource_value) - ''' - LOGGER.info('PDP:{}'.format(c_rules_dict)) flow_id = c_rules_dict["flow_id"] reply_txt = "" # to get the reply form the optical module #multi-granular reply_txt = reconfig_flex_lightpath(flow_id) reply_json = json.loads(reply_txt) - LOGGER.info('[PDP] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) devices = topology_details.devices context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid topology_uuid_x = topology_details.topology_id.topology_uuid.uuid @@ -768,15 +744,12 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): new_connection = optical_reply.connections[0] #for candidate_new_connection in pathcomp_reply.connections: str_candidate_new_connection = connection_to_string(new_connection) - LOGGER.info('QQQQ_old:{}'.format(str_old_connection)) - LOGGER.info('QQQQ_new:{}'.format(str_candidate_new_connection)) # Change UUID of new connection to prevent collisions tmp_connection = Connection() tmp_connection.CopyFrom(new_connection) tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) new_connection = tmp_connection service_new = optical_reply.services[0] - LOGGER.info('QQQQ:{}'.format(service_new)) # Feed TaskScheduler with the service to update, the old connection to -- GitLab From e4e1401d4cd7dd2fa531456cad607aa847157de8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 15:05:39 +0000 Subject: [PATCH 37/41] Pre-merge code cleanup --- src/service/service/monitoring.py | 148 ------------------------------ 1 file changed, 148 deletions(-) delete mode 100644 src/service/service/monitoring.py diff --git a/src/service/service/monitoring.py b/src/service/service/monitoring.py deleted file mode 100644 index dad72f00a..000000000 --- a/src/service/service/monitoring.py +++ /dev/null @@ -1,148 +0,0 @@ -import uuid -from common.proto import kpi_manager_pb2 -from common.proto.kpi_sample_types_pb2 import KpiSampleType -from kpi_manager.client.KpiManagerClient import KpiManagerClient -import logging -import pytest -from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor, KpiDescriptorFilter, KpiDescriptorList - -import uuid -from common.proto import kpi_manager_pb2 -from common.proto.kpi_sample_types_pb2 import KpiSampleType -from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI - -from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector - - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - - -@pytest.fixture(scope='session') -def kpi_manager_client(): - LOGGER.info('Starting KpiManagerClient...') - _client = KpiManagerClient(host="10.152.183.91") - _client.connect() - LOGGER.info('Yielding Connected KpiManagerClient...') - yield _client - LOGGER.info('Closed KpiManagerClient...') - _client.close() - - - -def create_kpi_descriptor_request(descriptor_name: str = "optical_monitoring"): - _create_kpi_request = kpi_manager_pb2.KpiDescriptor() - #_create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) - _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" - # _create_kpi_request.kpi_id.kpi_id.uuid = "f974b6cc-095f-4767-b8c1-3457b383fb99" - _create_kpi_request.kpi_description = descriptor_name - _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT - #_create_kpi_request.device_id.device_uuid.uuid = str(uuid.uuid4()) - _create_kpi_request.device_id.device_uuid.uuid = "5dc3f5d7-d3a9-5057-a9a0-8af943a5461c" - _create_kpi_request.service_id.service_uuid.uuid = 'SERV2' - _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' - #_create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) - _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "decb9c95-7298-5ec8-a4b6-7f276f595106" - _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' - _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' - return _create_kpi_request - - -''' -def test_SetKpiDescriptor(kpi_manager_client): - LOGGER.info(" >>> test_SetKpiDescriptor: START <<< ") - response = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, KpiId) -''' - -''' -def test_GetKpiDescriptor(kpi_manager_client): - LOGGER.info(" >>> test_GetKpiDescriptor: START <<< ") - # adding KPI - response_id = kpi_manager_client.SetKpiDescriptor(create_kpi_descriptor_request()) - # get KPI - response = kpi_manager_client.GetKpiDescriptor(response_id) - LOGGER.info("Response gRPC message object: {:}".format(response)) - assert isinstance(response, KpiDescriptor) -''' - -# Test device connection parameters -devices = { - 'device1': { - 'host': '172.17.254.22', - 'port': '50061', - 'username': 'admin', - 'password': 'admin', - 'insecure': True, - } -} - -def create_basic_sub_request_parameters( - resource: str = 'components', - endpoint: str = 'port-1-in', # 'Ethernet1', - kpi: KPI = KPI.KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT, # It should be KPI Id not name? Need to be replaced with KPI id. -) -> dict: - - device = devices['device1'] - return { - 'target' : (device['host'], device['port']), - 'username' : device['username'], - 'password' : device['password'], - 'connect_timeout' : 15, - 'insecure' : device['insecure'], - 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample - 'sample_interval_ns': '3s', - 'sample_interval' : '10s', - 'kpi' : kpi, - 'resource' : resource, - 'endpoint' : endpoint, - } - - -@pytest.fixture -def sub_parameters(): - """Fixture to provide subscription parameters.""" - return create_basic_sub_request_parameters() - - -@pytest.fixture -def collector(sub_parameters): - """Fixture to create and connect GNMI collector.""" - collector = GNMIOpenConfigCollector( - username = sub_parameters['username'], - password = sub_parameters['password'], - insecure = sub_parameters['insecure'], - address = sub_parameters['target'][0], - port = sub_parameters['target'][1], - ) - collector.Connect() - yield collector - collector.Disconnect() - - -@pytest.fixture -def subscription_data(sub_parameters): - """Fixture to provide subscription data.""" - # It should return a list of tuples with subscription parameters. - return [ - ( - "sub_id_123", - { - "kpi" : sub_parameters['kpi'], - "endpoint" : sub_parameters['endpoint'], - "resource" : sub_parameters['resource'], - }, - float(10.0), - float(5.0), - ), - ] - - -def test_collector_connection(collector): - """Test collector connection.""" - LOGGER.info("----- Testing GNMI OpenConfig Collector Connection -----") - assert collector.connected is True - LOGGER.debug("Collector connected: %s", collector.connected) - - -- GitLab From 04c890364ae3c57ef12f00f8f455332a8bad055f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 15:13:01 +0000 Subject: [PATCH 38/41] Pre-merge code cleanup --- src/tests/ofc24/deploy-node-agents.sh | 29 +++++++++++++++------------ 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/src/tests/ofc24/deploy-node-agents.sh b/src/tests/ofc24/deploy-node-agents.sh index 5a809a140..18b1603a3 100755 --- a/src/tests/ofc24/deploy-node-agents.sh +++ b/src/tests/ofc24/deploy-node-agents.sh @@ -34,19 +34,22 @@ echo echo "Create Management Network and Node Agents:" echo "------------------------------------------" docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 --ip-range=172.254.253.0/24 na-br -docker run -d --name na-t1 --network=na-br --ip 172.254.253.1 \ - --volume "$PWD/src/tests/${TEST_NAME}/platform_t1.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-hhi.img:latest ./startNetconfAgent.sh -docker run -d --name na-t2 --network=na-br --ip 172.254.253.2 \ - --volume "$PWD/src/tests/${TEST_NAME}/platform_t2.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-hhi.img:latest ./startNetconfAgent.sh -docker run -d --name na-r1 --network=na-br --ip 172.254.253.101 \ - --volume "$PWD/src/tests/${TEST_NAME}/platform_r1.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-node.img:latest ./startNetconfAgent.sh - -docker run -d --name na-r2 --network=na-br --ip 172.254.253.102 \ - --volume "$PWD/src/tests/${TEST_NAME}/platform_r2.xml:/confd/examples.confd/OC23/init_openconfig-platform.xml" \ - asgamb1/flexscale-node.img:latest ./startNetconfAgent.sh +docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 --publish 2022 \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t1.xml:/confd/examples.confd/OC23/platform.xml" \ + asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh +docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 --publish 2022 \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t2.xml:/confd/examples.confd/OC23/platform.xml" \ + asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh +docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 --publish 2022 \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r1.xml:/confd/examples.confd/OC23/platform.xml" \ + asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh +docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 --publish 2022 \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ + --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r2.xml:/confd/examples.confd/OC23/platform.xml" \ + asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh echo -- GitLab From 8cf947404df4fb452663c057116462d11097b691 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 15:15:35 +0000 Subject: [PATCH 39/41] Pre-merge code cleanup --- src/tests/ofc26_flexscale/mock_tfs_services.py | 14 +++++++++++++- src/tests/ofc26_flexscale/test_ofc26_messages.py | 13 +++++++++++++ .../test_ofc26_mgon_integration_V2.py | 13 +++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/src/tests/ofc26_flexscale/mock_tfs_services.py b/src/tests/ofc26_flexscale/mock_tfs_services.py index 6cbacd610..6145e60f5 100644 --- a/src/tests/ofc26_flexscale/mock_tfs_services.py +++ b/src/tests/ofc26_flexscale/mock_tfs_services.py @@ -1,4 +1,16 @@ - +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import os, pytest diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py index b29bb949e..87549a300 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_messages.py +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -1,3 +1,16 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import json diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py index ec3a57ee0..49ac18b0c 100644 --- a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -1,3 +1,16 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging -- GitLab From 0ac3bc0add39d9555191ff0665dd0cc470adc897 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 30 Mar 2026 15:50:54 +0000 Subject: [PATCH 40/41] Analytics component: - Fixed timeout to something reasonable --- src/analytics/frontend/tests/test_frontend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py index bc210889a..6532d6ee3 100644 --- a/src/analytics/frontend/tests/test_frontend.py +++ b/src/analytics/frontend/tests/test_frontend.py @@ -138,7 +138,7 @@ def test_StartAnalyzer_MGON_Agent(analyticsFrontend_client): added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer_for_mgon_agent()) LOGGER.debug(str(added_analyzer_id)) LOGGER.info("waiting for timer to complete 3000 seconds ...") - time.sleep(3000) + time.sleep(60) LOGGER.info('--> StopAnalyzer after timer completion') response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id) LOGGER.debug(str(response)) -- GitLab From 440907c331fbf6ed9e1def50e502cb72039c99dc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 31 Mar 2026 15:16:13 +0000 Subject: [PATCH 41/41] Service component: - Fix syntax warning --- src/service/service/ServiceServiceServicerImpl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index c71123573..272174d99 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -655,7 +655,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): device_uuid = device.device_id.device_uuid.uuid device_names[device_uuid] = device.name - if reply_txt is not "": + if reply_txt != '': optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") new_connection = optical_reply.connections[0] #for candidate_new_connection in pathcomp_reply.connections: @@ -739,7 +739,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): device_uuid = device.device_id.device_uuid.uuid device_names[device_uuid] = device.name - if reply_txt is not "": + if reply_txt != '': optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") new_connection = optical_reply.connections[0] #for candidate_new_connection in pathcomp_reply.connections: -- GitLab