Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (9)
Showing
with 933 additions and 52 deletions
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, random, uuid
from typing import Dict, Tuple
from compute.service.rest_server.nbi_plugins.ietf_network_slice.bindings.network_slice_services import (
NetworkSliceServices
)
# R1 emulated devices
# Port 13-0 is Optical
# Port 13-1 is Copper
R1_UUID = "ed2388eb-5fb9-5888-a4f4-160267d3e19b"
R1_PORT_13_0_UUID_OPTICAL = "20440915-1a6c-5e7b-a80f-b0e0e51f066d"
R1_PORT_13_1_UUID_COPPER = "ff900d5d-2ac0-576c-9628-a2d016681f9d"
# R2 emulated devices
# Port 13-0 is Optical
# Port 13-1 is Copper
R2_UUID = "49ce0312-1274-523b-97b8-24d0eca2d72d"
R2_PORT_13_0_UUID_OPTICAL = "214618cb-b63b-5e66-84c2-45c1c016e5f0"
R2_PORT_13_1_UUID_COPPER = "4e0f7fb4-5d22-56ad-a00e-20bffb4860f9"
# R3 emulated devices
# Port 13-0 is Optical
# Port 13-1 is Copper
R3_UUID = "3bc8e994-a3b9-5f60-9c77-6608b1d08313"
R3_PORT_13_0_UUID_OPTICAL = "da5196f5-d651-5def-ada6-50ed6430279d"
R3_PORT_13_1_UUID_COPPER = "43d221fa-5701-5740-a129-502131f5bda2"
# R4 emulated devices
# Port 13-0 is Optical
# Port 13-1 is Copper
R4_UUID = "b43e6361-2573-509d-9a88-1793e751b10d"
R4_PORT_13_0_UUID_OPTICAL = "241b74a7-8677-595c-ad65-cc9093c1e341"
R4_PORT_13_1_UUID_COPPER = "c57abf46-caaf-5954-90cc-1fec0a69330e"
node_dict = {R1_PORT_13_1_UUID_COPPER: R1_UUID,
R2_PORT_13_1_UUID_COPPER: R2_UUID,
R3_PORT_13_1_UUID_COPPER: R3_UUID,
R4_PORT_13_1_UUID_COPPER: R4_UUID}
list_endpoints = [R1_PORT_13_1_UUID_COPPER,
R2_PORT_13_1_UUID_COPPER,
R3_PORT_13_1_UUID_COPPER,
R4_PORT_13_1_UUID_COPPER]
list_availability= [99, 99.9, 99.99, 99.999, 99.9999]
list_bw = [10, 40, 50, 100, 150, 200, 400]
list_owner = ["Telefonica", "CTTC", "Telenor", "ADVA", "Ubitech", "ATOS"]
URL_POST = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services"
URL_DELETE = "/restconf/data/ietf-network-slice-service:ietf-nss/network-slice-services/slice-service="
def generate_request(seed: str) -> Tuple[Dict, str]:
ns = NetworkSliceServices()
# Slice 1
suuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(seed)))
slice1 = ns.slice_service[suuid]
slice1.service_description = "Test slice for OFC 2023 demo"
slice1.status().admin_status().status = "Planned" # TODO not yet mapped
# SDPS: R1 optical to R3 optical
sdps1 = slice1.sdps().sdp
while True:
ep1_uuid = random.choice(list_endpoints)
ep2_uuid = random.choice(list_endpoints)
if ep1_uuid != ep2_uuid:
break
sdps1[ep1_uuid].node_id = node_dict.get(ep1_uuid)
sdps1[ep2_uuid].node_id = node_dict.get(ep2_uuid)
# Connectivity group: Connection construct and 2 sla constrains:
# - Bandwidth
# - Availability
cg_uuid = str(uuid.uuid4())
cg = slice1.connection_groups().connection_group
cg1 = cg[cg_uuid]
cc1 = cg1.connectivity_construct[0]
cc1.cc_id = 5
p2p = cc1.connectivity_construct_type.p2p()
p2p.p2p_sender_sdp = ep1_uuid
p2p.p2p_receiver_sdp = ep2_uuid
slo_custom = cc1.slo_sle_policy.custom()
metric_bounds = slo_custom.service_slo_sle_policy().metric_bounds().metric_bound
# SLO Bandwidth
slo_bandwidth = metric_bounds["service-slo-two-way-bandwidth"]
slo_bandwidth.value_description = "Guaranteed bandwidth"
slo_bandwidth.bound = int(random.choice(list_bw))
slo_bandwidth.metric_unit = "Gbps"
# SLO Availability
slo_availability = metric_bounds["service-slo-availability"]
slo_availability.value_description = "Guaranteed availability"
slo_availability.metric_unit = "percentage"
slo_availability.bound = random.choice(list_availability)
json_request = {"data": ns.to_json()}
#Last, add name and owner manually
list_name_owner = [{"tag-type": "owner", "value": random.choice(list_owner)}]
json_request["data"]["ietf-network-slice-service:network-slice-services"]["slice-service"][0]["service-tags"] = list_name_owner
return (json_request, suuid)
if __name__ == "__main__":
request = generate_request(123)
print(json.dumps(request[0], sort_keys=True, indent=4))
......@@ -39,14 +39,6 @@ ROUTER_ID = {
'R149': '5.5.5.5',
'R155': '5.5.5.1',
'R199': '5.5.5.6',
}
VIRTUAL_CIRCUIT = {
'R149': '5.5.5.5',
'R155': '5.5.5.1',
'R199': '5.5.5.6',
}
class RequestGenerator:
......@@ -269,8 +261,8 @@ class RequestGenerator:
src_device_name = self._device_data[src_device_uuid]['name']
src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
src_router_id = ROUTER_ID.get(src_device_name)
src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
src_router_id = ROUTER_ID.get(src_device_name)
if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
dst_device_name = self._device_data[dst_device_uuid]['name']
......@@ -322,8 +314,8 @@ class RequestGenerator:
src_device_name = self._device_data[src_device_uuid]['name']
src_endpoint_name = self._device_endpoint_data[src_device_uuid][src_endpoint_uuid]['name']
src_router_id = ROUTER_ID.get(src_device_name)
src_router_num = int(re.findall(r'^\D*(\d+)', src_device_name)[0])
src_router_id = ROUTER_ID.get(src_device_name)
if src_router_id is None: src_router_id = '10.0.0.{:d}'.format(src_router_num)
src_address_ip = '10.{:d}.{:d}.{:d}'.format(x, y, src_router_num)
......
backend/wireshark
backend/*.o
backend/pathComp
backend/pathComp-dbg
......@@ -144,7 +144,7 @@ void computation_shortest_path(struct service_t* s, struct compRouteOutput_t* pa
DEBUG_PC("Computed Path Avail Bw: %f, Path Cost: %f, latency: %f", p->availCap, p->cost, p->delay);
print_path(p);
gboolean feasibleRoute = check_computed_path_feasability(s, p);
gboolean feasibleRoute = check_computed_path_feasibility(s, p);
if (feasibleRoute == TRUE) {
DEBUG_PC("SP Feasible");
print_path(p);
......
......@@ -1460,8 +1460,7 @@ void modify_targeted_graph (struct graph_t *g, struct path_set_t *A, struct comp
* @date 2022
*/
/////////////////////////////////////////////////////////////////////////////////////////
gint find_nodeId (gconstpointer data, gconstpointer userdata)
{
gint find_nodeId (gconstpointer data, gconstpointer userdata) {
/** check values */
g_assert(data != NULL);
g_assert(userdata != NULL);
......@@ -1471,8 +1470,7 @@ gint find_nodeId (gconstpointer data, gconstpointer userdata)
//DEBUG_PC ("SNodeId (%s) nodeId (%s)", SNodeId->node.nodeId, nodeId);
if (!memcmp(SNodeId->node.nodeId, nodeId, strlen (SNodeId->node.nodeId)))
{
if (!memcmp(SNodeId->node.nodeId, nodeId, strlen (SNodeId->node.nodeId))) {
return (0);
}
return -1;
......@@ -1501,13 +1499,13 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
g_assert(g); g_assert(s); g_assert(mapNodes);
struct targetNodes_t *v = &(g->vertices[indexGraphU].targetedVertices[indexGraphV]);
DEBUG_PC("Explored Link %s => %s)", u->node.nodeId, v->tVertice.nodeId);
DEBUG_PC("=======================CHECK Edge %s => %s =================================", u->node.nodeId, v->tVertice.nodeId);
//DEBUG_PC("\t %s => %s", u->node.nodeId, v->tVertice.nodeId);
// v already explored in S? then, discard it
GList *found = g_list_find_custom (*S, v->tVertice.nodeId, find_nodeId);
if (found != NULL) {
DEBUG_PC ("v (%s) in S, Discard", v->tVertice.nodeId);
DEBUG_PC ("%s in S, DISCARD", v->tVertice.nodeId);
return 0;
}
......@@ -1524,10 +1522,11 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
DEBUG_PC("EDGE %s[%s] => %s[%s]", u->node.nodeId, e->aEndPointId, v->tVertice.nodeId, e->zEndPointId);
//DEBUG_PC ("\t %s[%s] =>", u->node.nodeId, e->aEndPointId);
//DEBUG_PC("\t => %s[%s]", v->tVertice.nodeId, e->zEndPointId);
DEBUG_PC("\t AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw);
DEBUG_PC("\t Edge Att: AvailBw: %f, TotalBw: %f", edgeAvailBw, edgeTotalBw);
// Check Service Bw constraint
if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint))
if ((path_constraints->bw == TRUE) && (edgeAvailBw < path_constraints->bwConstraint)) {
continue;
}
else {
foundAvailBw = 1;
break;
......@@ -1535,7 +1534,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
}
// BW constraint NOT MET, then DISCARD edge
if ((path_constraints->bw == TRUE) && (foundAvailBw == 0)) {
DEBUG_PC ("AvailBw: %f < path_constraint: %f -- Discard Edge", edgeAvailBw, path_constraints->bwConstraint);
DEBUG_PC ("Edge AvailBw: %f < path_constraint: %f -- DISCARD Edge", edgeAvailBw, path_constraints->bwConstraint);
g_free(path_constraints);
return 0;
}
......@@ -1581,13 +1580,14 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
if (arg & ENERGY_EFFICIENT_ARGUMENT) {
if (distance_through_u == v_map->distance) {
if (power_through_u > v_map->power) {
DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) >Energy (src, v): %f (Watts)--> DISCARD LINK", power_through_u, v_map->power);
DEBUG_PC("Energy (src -> u + u -> v: %f (Watts) > Energy (src, v): %f (Watts) --> DISCARD EDGE", power_through_u, v_map->power);
return 0;
}
// same energy consumption, consider latency
if ((power_through_u == v_map->power) && (latency_through_u > v_map->latency)) {
return 0;
}
// same energy, same latency, criteria: choose the one having the largest available bw
if ((power_through_u == v_map->power) && (latency_through_u == v_map->latency) && (availBw_through_u < v_map->avaiBandwidth)) {
return 0;
}
......@@ -1604,8 +1604,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
return 0;
}
}
DEBUG_PC ("%s --> %s Relaxed", u->node.nodeId, v->tVertice.nodeId);
DEBUG_PC ("\t AvailBw: %f Mb/s, Cost: %f, Latency: %f ms, Energy: %f Watts", availBw_through_u, distance_through_u, latency_through_u, power_through_u);
DEBUG_PC ("Edge %s --> %s [RELAXED]", u->node.nodeId, v->tVertice.nodeId);
DEBUG_PC ("\t path till %s: AvailBw: %f Mb/s | Cost: %f | Latency: %f ms | Energy: %f Watts", v->tVertice.nodeId, availBw_through_u, distance_through_u,
latency_through_u, power_through_u);
// Update Q list --
struct nodeItem_t *nodeItem = g_malloc0 (sizeof (struct nodeItem_t));
......@@ -1622,8 +1623,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
if (arg & ENERGY_EFFICIENT_ARGUMENT) {
*Q = g_list_insert_sorted(*Q, nodeItem, sort_by_energy);
}
else
else {
*Q = g_list_insert_sorted (*Q, nodeItem, sort_by_distance);
}
// Update the mapNodes for the specific reached tv
v_map->distance = distance_through_u;
......@@ -1635,9 +1637,9 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
struct edges_t *e1 = &(v_map->predecessor);
struct edges_t *e2 = &(v->edges[indexEdge]);
duplicate_edge(e1, e2);
DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId);
//DEBUG_PC ("u->v Edge: %s(%s) --> %s(%s)", e2->aNodeId.nodeId, e2->aEndPointId, e2->zNodeId.nodeId, e2->zEndPointId);
//DEBUG_PC("v-pred aTopology: %s", e2->aTopologyId);
DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId);
//DEBUG_PC("v-pred zTopology: %s", e2->zTopologyId);
// Check whether v is dstPEId
//DEBUG_PC ("Targeted dstId: %s", s->service_endpoints_id[1].device_uuid);
......@@ -1659,7 +1661,7 @@ gint check_link (struct nodeItem_t *u, gint indexGraphU, gint indexGraphV, struc
* @date 2022
*/
/////////////////////////////////////////////////////////////////////////////////////////
gboolean check_computed_path_feasability (struct service_t *s, struct compRouteOutputItem_t* p) {
gboolean check_computed_path_feasibility (struct service_t *s, struct compRouteOutputItem_t* p) {
float epsilon = 0.0000001;
struct path_constraints_t* pathCons = get_path_constraints(s);
gboolean ret = TRUE;
......@@ -2346,7 +2348,7 @@ void build_contextSet_linklList(GList** set, gint activeFlag) {
// for each link in linkList:
// 1st- Retrieve endpoints A --> B feauture (context Id, device Id, endpoint Id)
// 2st - In the graph associated to the contextId, check wheter A (deviceId) is in the vertices list
// o No, this is weird ... exist
// o No, this is weird ... exit
// o Yes, get the other link endpoint (i.e., B) and check whether it exists. If NOT add it, considering
// all the attributes; Otherwise, check whether the link is different from existing edges between A and B
gdouble epsilon = 0.1;
......@@ -3065,7 +3067,7 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv
// if ingress of the root link (aNodeId) is the spurNode, then stops
if (compare_node_id(&re->aNodeId, SN) == 0) {
DEBUG_PC("root Link: aNodeId: %s and spurNode: %s -- stop exploring the rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
DEBUG_PC("Ingress Node rootLink %s = spurNode %s; STOP exploring rootPath (RP)", re->aNodeId.nodeId, SN->nodeId);
break;
}
// Extract from Q
......@@ -3073,7 +3075,6 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv
struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
Q = g_list_remove(Q, node);
//DEBUG_RL_RA ("Exploring node %s", node->node.nodeId);
indexVertice = graph_vertice_lookup(node->node.nodeId, g);
g_assert(indexVertice >= 0);
......@@ -3087,22 +3088,21 @@ void dijkstra(gint srcMapIndex, gint dstMapIndex, struct graph_t* g, struct serv
}
// Check that the first node in Q set is SpurNode, otherwise something went wrong ...
if (compare_node_id(&re->aNodeId, SN) != 0) {
//DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
DEBUG_PC ("root Link: aNodeId: %s is NOT the spurNode: %s -- something wrong", re->aNodeId.nodeId, SN->nodeId);
g_list_free_full(g_steal_pointer(&S), g_free);
g_list_free_full(g_steal_pointer(&Q), g_free);
return;
}
}
while (g_list_length(Q) > 0) {
//Extract from Q set
GList* listnode = g_list_first(Q);
struct nodeItem_t* node = (struct nodeItem_t*)(listnode->data);
Q = g_list_remove(Q, node);
DEBUG_PC("Q length: %d", g_list_length(Q));
DEBUG_PC("DeviceId: %s", node->node.nodeId);
DEBUG_PC("Explored DeviceId: %s", node->node.nodeId);
// visit all the links from u within the graph
// scan all the links from u within the graph
indexVertice = graph_vertice_lookup(node->node.nodeId, g);
g_assert(indexVertice >= 0);
......@@ -3140,18 +3140,19 @@ gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s,
struct map_nodes_t* mapNodes, guint arg) {
g_assert(pred); g_assert(g); g_assert(s);
DEBUG_PC("Source: %s -- Destination: %s", s->service_endpoints_id[0].device_uuid, s->service_endpoints_id[1].device_uuid);
DEBUG_PC("SOURCE: %s --> DESTINATION: %s", s->service_endpoints_id[0].device_uuid,
s->service_endpoints_id[1].device_uuid);
// Check the both ingress src and dst endpoints are in the graph
gint srcMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[0].device_uuid, mapNodes);
if (srcMapIndex == -1) {
DEBUG_PC("ingress DeviceId: %s NOT in the graph", s->service_endpoints_id[0].device_uuid);
DEBUG_PC("ingress DeviceId: %s NOT in G", s->service_endpoints_id[0].device_uuid);
return -1;
}
gint dstMapIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
if (dstMapIndex == -1) {
DEBUG_PC("egress DeviceId: %s NOT in the graph", s->service_endpoints_id[1].device_uuid);
DEBUG_PC("egress DeviceId: %s NOT in G", s->service_endpoints_id[1].device_uuid);
return -1;
}
......@@ -3165,17 +3166,17 @@ gint ksp_comp(struct pred_t* pred, struct graph_t* g, struct service_t* s,
gint map_dstIndex = get_map_index_by_nodeId(s->service_endpoints_id[1].device_uuid, mapNodes);
struct map_t* dest_map = &mapNodes->map[map_dstIndex];
if (!(dest_map->distance < INFINITY_COST)) {
DEBUG_PC("destination: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
DEBUG_PC("DESTINATION: %s NOT reachable", s->service_endpoints_id[1].device_uuid);
return -1;
}
DEBUG_PC("AvailBw @ %s is %f", dest_map->verticeId.nodeId, dest_map->avaiBandwidth);
// Check that the computed available bandwidth is larger than 0.0
if (dest_map->avaiBandwidth <= (gfloat)0.0) {
DEBUG_PC("dst: %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
DEBUG_PC("DESTINATION %s NOT REACHABLE", s->service_endpoints_id[1].device_uuid);
return -1;
}
DEBUG_PC("dst: %s REACHABLE", s->service_endpoints_id[1].device_uuid);
DEBUG_PC("DESTINATION %s REACHABLE", s->service_endpoints_id[1].device_uuid);
// Handle predecessors
build_predecessors(pred, s, mapNodes);
return 1;
......@@ -3229,6 +3230,9 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_
struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[0]);
struct service_endpoints_id_t* eEp = &(s->service_endpoints_id[1]);
DEBUG_PC("=======================================================================================");
DEBUG_PC("STARTING PATH COMP FOR %s[%s] --> %s[%s]", iEp->device_uuid, iEp->endpoint_uuid, eEp->device_uuid, eEp->endpoint_uuid);
// Compute the 1st KSP path
gint done = ksp_comp(predecessors, g, s, NULL, NULL, mapNodes, arg);
if (done == -1) {
......@@ -3360,9 +3364,10 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_
// copy the service endpoints, in general, there will be 2 (point-to-point network connectivity services)
for (gint m = 0; m < s->num_service_endpoints_id; m++) {
struct service_endpoints_id_t* iEp = &(s->service_endpoints_id[m]);
struct service_endpoints_id_t* oEp = &(s->service_endpoints_id[m]);
struct service_endpoints_id_t* oEp = &(path->service_endpoints_id[m]);
copy_service_endpoint_id(oEp, iEp);
}
path->num_service_endpoints_id = s->num_service_endpoints_id;
// Print all the paths i A
for (gint h = 0; h < A->numPaths; h++) {
......@@ -3376,9 +3381,10 @@ void alg_comp(struct service_t* s, struct compRouteOutput_t* path, struct graph_
DEBUG_PC("Number Requested paths (%d) REACHED - STOP", ksp);
break;
}
gdouble feasibleRoute = check_computed_path_feasability(s, &A->paths[ksp]);
gdouble feasibleRoute = check_computed_path_feasibility(s, &A->paths[ksp]);
if (feasibleRoute == TRUE) {
DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap, A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power);
DEBUG_PC("A[%d] available: %f, pathCost: %f; latency: %f, Power: %f", ksp, A->paths[ksp].availCap,
A->paths[ksp].cost, A->paths[ksp].delay, A->paths[ksp].power);
struct compRouteOutputItem_t* pathaux = &A->paths[ksp];
path->numPaths++;
struct path_t* targetedPath = &path->paths[path->numPaths - 1];
......
......@@ -39,7 +39,7 @@ extern GList* activeServList;
#define INFINITY_COST 0xFFFFFFFF
#define MAX_NUM_PRED 100
#define MAX_KSP_VALUE 3
#define MAX_KSP_VALUE 5
// HTTP RETURN CODES
#define HTTP_CODE_OK 200
......@@ -576,7 +576,7 @@ gboolean matching_path_rootPath (struct compRouteOutputItem_t *, struct compRout
void modify_targeted_graph (struct graph_t *, struct path_set_t *, struct compRouteOutputItem_t *, struct nodes_t *);
gint find_nodeId (gconstpointer, gconstpointer);
gint check_link (struct nodeItem_t *, gint, gint, struct graph_t *, struct service_t *, GList **, GList **, struct map_nodes_t *, guint arg);
gboolean check_computed_path_feasability (struct service_t *, struct compRouteOutputItem_t * );
gboolean check_computed_path_feasibility (struct service_t *, struct compRouteOutputItem_t * );
gint sort_by_distance (gconstpointer, gconstpointer);
gint sort_by_energy(gconstpointer, gconstpointer);
......
......@@ -33,12 +33,12 @@ DEVICE_TYPE_TO_DEEPNESS = {
DeviceTypeEnum.EMULATED_P4_SWITCH.value : 60,
DeviceTypeEnum.P4_SWITCH.value : 60,
DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 40,
DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 40,
DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value : 40,
DeviceTypeEnum.XR_CONSTELLATION.value : 40,
DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value : 30,
DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value : 30,
DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value : 30,
DeviceTypeEnum.OPEN_LINE_SYSTEM.value : 30,
......
......@@ -12,23 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import json, logging
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from common.method_wrappers.ServiceExceptions import NotFoundException
from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId
from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceDriverEnum, DeviceId, Service, ServiceId
from common.tools.context_queries.Connection import get_connection_by_id
from common.tools.context_queries.Device import get_device
from common.tools.context_queries.Service import get_service_by_id
from common.tools.grpc.Tools import grpc_message_list_to_json_string
from common.tools.object_factory.Device import json_device_id
from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient
from service.service.service_handler_api.Exceptions import (
UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException)
from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class
from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key
if TYPE_CHECKING:
from service.service.service_handler_api._ServiceHandler import _ServiceHandler
LOGGER = logging.getLogger(__name__)
CacheableObject = Union[Connection, Device, Service]
class CacheableObjectType(Enum):
......@@ -169,5 +174,21 @@ class TaskExecutor:
self, connection : Connection, service : Service, **service_handler_settings
) -> '_ServiceHandler':
connection_devices = self.get_devices_from_connection(connection, exclude_managed_by_controller=True)
service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices)
return service_handler_class(service, self, **service_handler_settings)
try:
service_handler_class = get_service_handler_class(
self._service_handler_factory, service, connection_devices)
return service_handler_class(service, self, **service_handler_settings)
except (UnsatisfiedFilterException, UnsupportedFilterFieldException, UnsupportedFilterFieldValueException):
dict_connection_devices = {
cd_data.name : (cd_uuid, cd_data.name, {
(device_driver, DeviceDriverEnum.Name(device_driver))
for device_driver in cd_data.device_drivers
})
for cd_uuid,cd_data in connection_devices.items()
}
LOGGER.exception(
'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}'.format(
grpc_message_list_to_json_string(service), grpc_message_list_to_json_string(connection),
str(dict_connection_devices)
)
)
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime, re
from typing import Dict, List, Optional, Tuple
from .tools.FileSystem import create_folders
from .tools.HistogramData import HistogramData
from .tools.Plotter import plot_histogram
from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
##### EXPERIMENT SETTINGS ##############################################################################################
EXPERIMENT_NAME = 'L2VPN with Emulated'
EXPERIMENT_ID = 'l2vpn-emu'
TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
TIME_STEP = '1m'
LABEL_FILTERS = {}
##### ENVIRONMENT SETTINGS #############################################################################################
PROM_ADDRESS = '127.0.0.1'
PROM_PORT = 9090
OUT_FOLDER = 'data/perf/'
##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
EXPERIMENT_ID += '/component-rpcs'
SERIES_MATCH = 'tfs_.+_rpc_.+_histogram_duration_bucket'
RE_SERIES_NAME = re.compile(r'^tfs_(.+)_rpc_(.+)_histogram_duration_bucket$')
SERIES_LABELS = []
SUBSYSTEMS_MAPPING = {
'context': {
'context' : 'context',
'topolog' : 'topology',
'device' : 'device',
'endpoint' : 'device',
'link' : 'link',
'service' : 'service',
'slice' : 'slice',
'policyrule': 'policyrule',
'connection': 'connection',
}
}
def get_subsystem(component : str, rpc_method : str) -> Optional[str]:
return next(iter([
subsystem
for pattern,subsystem in SUBSYSTEMS_MAPPING.get(component, {}).items()
if pattern in rpc_method
]), None)
def update_keys(component : str, rpc_method : str) -> Tuple[Tuple, Tuple]:
subsystem = get_subsystem(component, rpc_method)
collection_keys = (component, subsystem)
histogram_keys = (rpc_method,)
return collection_keys, histogram_keys
def get_plot_specs(folders : Dict[str, str], component : str, subsystem : Optional[str]) -> Tuple[str, str]:
if subsystem is None:
title = '{:s} - RPC Methods [{:s}]'.format(component.title(), EXPERIMENT_NAME)
filepath = '{:s}/{:s}.png'.format(folders['png'], component)
else:
title = '{:s} - RPC Methods - {:s} [{:s}]'.format(component.title(), subsystem.title(), EXPERIMENT_NAME)
filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], component, subsystem)
return title, filepath
##### AUTOMATED CODE ###################################################################################################
def get_series_names(folders : Dict[str, str]) -> List[str]:
series_names = get_prometheus_series_names(
PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
raw_json_filepath='{:s}/_series.json'.format(folders['json'])
)
return series_names
def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
m = RE_SERIES_NAME.match(series_name)
if m is None:
# pylint: disable=broad-exception-raised
raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
extra_labels = m.groups()
results = get_prometheus_range(
PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
)
histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
save_histograms(histograms, folders['csv'])
return histograms
def main() -> None:
histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
series_names = get_series_names(folders)
for series_name in series_names:
histograms = get_histogram_data(series_name, folders)
for histogram_keys, histogram_data in histograms.items():
collection_keys,histogram_keys = update_keys(*histogram_keys)
histograms = histograms_collection.setdefault(collection_keys, dict())
histograms[histogram_keys] = histogram_data
for histogram_keys,histograms in histograms_collection.items():
title, filepath = get_plot_specs(folders, *histogram_keys)
plot_histogram(histograms, filepath, title=title)
if __name__ == '__main__':
main()
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime, re
from typing import Dict, List, Optional, Tuple
from .tools.FileSystem import create_folders
from .tools.HistogramData import HistogramData
from .tools.Plotter import plot_histogram
from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
##### EXPERIMENT SETTINGS ##############################################################################################
EXPERIMENT_NAME = 'L2VPN with Emulated'
EXPERIMENT_ID = 'l2vpn-emu'
TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
TIME_STEP = '1m'
LABEL_FILTERS = {
#'driver': 'emulated',
#'operation': 'configure_device', # add_device / configure_device
#'step': 'get_device',
}
##### ENVIRONMENT SETTINGS #############################################################################################
PROM_ADDRESS = '127.0.0.1'
PROM_PORT = 9090
OUT_FOLDER = 'data/perf/'
##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
EXPERIMENT_ID += '/dev-drv-details'
SERIES_MATCH = 'tfs_device_execution_details_histogram_duration_bucket'
RE_SERIES_NAME = re.compile(r'^tfs_device_execution_details_histogram_duration_bucket$')
SERIES_LABELS = ['driver', 'operation', 'step']
def update_keys(driver : str, operation : str, step : str) -> Tuple[Tuple, Tuple]:
collection_keys = (driver, operation)
histogram_keys = (step,)
return collection_keys, histogram_keys
def get_plot_specs(folders : Dict[str, str], driver : str, operation : str) -> Tuple[str, str]:
title = 'Device Driver - {:s} - {:s}'.format(driver.title(), operation.replace('_', '').title())
filepath = '{:s}/{:s}-{:s}.png'.format(folders['png'], driver, operation)
return title, filepath
##### AUTOMATED CODE ###################################################################################################
def get_series_names(folders : Dict[str, str]) -> List[str]:
series_names = get_prometheus_series_names(
PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
raw_json_filepath='{:s}/_series.json'.format(folders['json'])
)
return series_names
def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
m = RE_SERIES_NAME.match(series_name)
if m is None:
# pylint: disable=broad-exception-raised
raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
extra_labels = m.groups()
results = get_prometheus_range(
PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
)
histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
save_histograms(histograms, folders['csv'])
return histograms
def main() -> None:
histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
series_names = get_series_names(folders)
for series_name in series_names:
histograms = get_histogram_data(series_name, folders)
for histogram_keys, histogram_data in histograms.items():
collection_keys,histogram_keys = update_keys(*histogram_keys)
histograms = histograms_collection.setdefault(collection_keys, dict())
histograms[histogram_keys] = histogram_data
for histogram_keys,histograms in histograms_collection.items():
title, filepath = get_plot_specs(folders, *histogram_keys)
plot_histogram(histograms, filepath, title=title)
if __name__ == '__main__':
main()
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime, re
from typing import Dict, List, Tuple
from .tools.FileSystem import create_folders
from .tools.HistogramData import HistogramData
from .tools.Plotter import plot_histogram
from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
##### EXPERIMENT SETTINGS ##############################################################################################
EXPERIMENT_NAME = 'L2VPN with Emulated'
EXPERIMENT_ID = 'l2vpn-emu'
TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
TIME_STEP = '1m'
LABEL_FILTERS = {
#'driver': 'emulated',
}
##### ENVIRONMENT SETTINGS #############################################################################################
PROM_ADDRESS = '127.0.0.1'
PROM_PORT = 9090
OUT_FOLDER = 'data/perf/'
##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
EXPERIMENT_ID += '/dev-drv-methods'
SERIES_MATCH = 'tfs_device_driver_.+_histogram_duration_bucket'
RE_SERIES_NAME = re.compile(r'^tfs_device_driver_(.+)_histogram_duration_bucket$')
SERIES_LABELS = ['driver']
def update_keys(driver : str, method : str) -> Tuple[Tuple, Tuple]:
collection_keys = (driver,)
histogram_keys = (method,)
return collection_keys, histogram_keys
def get_plot_specs(folders : Dict[str, str], driver : str) -> Tuple[str, str]:
title = 'Device Driver - {:s}'.format(driver.title())
filepath = '{:s}/{:s}.png'.format(folders['png'], driver)
return title, filepath
##### AUTOMATED CODE ###################################################################################################
def get_series_names(folders : Dict[str, str]) -> List[str]:
series_names = get_prometheus_series_names(
PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
raw_json_filepath='{:s}/_series.json'.format(folders['json'])
)
return series_names
def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
m = RE_SERIES_NAME.match(series_name)
if m is None:
# pylint: disable=broad-exception-raised
raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
extra_labels = m.groups()
results = get_prometheus_range(
PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
)
histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
save_histograms(histograms, folders['csv'])
return histograms
def main() -> None:
histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
series_names = get_series_names(folders)
for series_name in series_names:
histograms = get_histogram_data(series_name, folders)
for histogram_keys, histogram_data in histograms.items():
collection_keys,histogram_keys = update_keys(*histogram_keys)
histograms = histograms_collection.setdefault(collection_keys, dict())
histograms[histogram_keys] = histogram_data
for histogram_keys,histograms in histograms_collection.items():
title, filepath = get_plot_specs(folders, *histogram_keys)
plot_histogram(histograms, filepath, title=title)
if __name__ == '__main__':
main()
# Tool: Perf Plots Generator:
Simple tool to gather performance data from Prometheus and produce histogram plots.
## Example:
- Ensure your MicroK8s includes the monitoring addon and your deployment specs the service monitors.
- Deploy TeraFlowSDN controller with your specific settings:
```(bash)
cd ~/tfs-ctrl
source my_deploy.sh
./deploy.sh
```
- Execute the test you want to meter.
- Select the appropriate script:
- Device_Driver_Methods : To report Device Driver Methods
- Device_Driver_Details : To report Device Add/Configure Details
- Service_Handler_Methods : To report Service Handler Methods
- Component_RPC_Methods : To report Component RPC Methods
- Tune the experiment settings
- Execute the report script:
```(bash)
PYTHONPATH=./src python -m tests.tools.perf_plots.<script>
```
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime, re
from typing import Dict, List, Tuple
from .tools.FileSystem import create_folders
from .tools.HistogramData import HistogramData
from .tools.Plotter import plot_histogram
from .tools.Prometheus import get_prometheus_range, get_prometheus_series_names
from .tools.Histogram import results_to_histograms, save_histograms, unaccumulate_histograms
##### EXPERIMENT SETTINGS ##############################################################################################
EXPERIMENT_NAME = 'L2VPN with Emulated'
EXPERIMENT_ID = 'l2vpn-emu'
TIME_START = datetime.datetime(2023, 5, 4, 6, 45, 0, 0, tzinfo=datetime.timezone.utc)
TIME_END = datetime.datetime(2023, 5, 4, 10, 15, 0, 0, tzinfo=datetime.timezone.utc)
TIME_STEP = '1m'
LABEL_FILTERS = {
#'handler': 'l2nm_emulated',
}
##### ENVIRONMENT SETTINGS #############################################################################################
PROM_ADDRESS = '127.0.0.1'
PROM_PORT = 9090
OUT_FOLDER = 'data/perf/'
##### PLOT-SPECIFIC CUSTOMIZATIONS #####################################################################################
EXPERIMENT_ID += '/svc-hdl-methods'
SERIES_MATCH = 'tfs_service_handler_.+_histogram_duration_bucket'
RE_SERIES_NAME = re.compile(r'^tfs_service_handler_(.+)_histogram_duration_bucket$')
SERIES_LABELS = ['handler']
def update_keys(handler : str, method : str) -> Tuple[Tuple, Tuple]:
collection_keys = (handler,)
histogram_keys = (method,)
return collection_keys, histogram_keys
def get_plot_specs(folders : Dict[str, str], handler : str) -> Tuple[str, str]:
title = 'Service Handler - {:s}'.format(handler.title())
filepath = '{:s}/{:s}.png'.format(folders['png'], handler)
return title, filepath
##### AUTOMATED CODE ###################################################################################################
def get_series_names(folders : Dict[str, str]) -> List[str]:
series_names = get_prometheus_series_names(
PROM_ADDRESS, PROM_PORT, SERIES_MATCH, TIME_START, TIME_END,
raw_json_filepath='{:s}/_series.json'.format(folders['json'])
)
return series_names
def get_histogram_data(series_name : str, folders : Dict[str, str]) -> Dict[Tuple, HistogramData]:
m = RE_SERIES_NAME.match(series_name)
if m is None:
# pylint: disable=broad-exception-raised
raise Exception('Unparsable series name: {:s}'.format(str(series_name)))
extra_labels = m.groups()
results = get_prometheus_range(
PROM_ADDRESS, PROM_PORT, series_name, LABEL_FILTERS, TIME_START, TIME_END, TIME_STEP,
raw_json_filepath='{:s}/_raw_{:s}.json'.format(folders['json'], series_name)
)
histograms = results_to_histograms(results, SERIES_LABELS, extra_labels=extra_labels)
unaccumulate_histograms(histograms, process_bins=True, process_timestamps=False)
save_histograms(histograms, folders['csv'])
return histograms
def main() -> None:
histograms_collection : Dict[Tuple, Dict[Tuple, HistogramData]] = dict()
folders = create_folders(OUT_FOLDER, EXPERIMENT_ID)
series_names = get_series_names(folders)
for series_name in series_names:
histograms = get_histogram_data(series_name, folders)
for histogram_keys, histogram_data in histograms.items():
collection_keys,histogram_keys = update_keys(*histogram_keys)
histograms = histograms_collection.setdefault(collection_keys, dict())
histograms[histogram_keys] = histogram_data
for histogram_keys,histograms in histograms_collection.items():
title, filepath = get_plot_specs(folders, *histogram_keys)
plot_histogram(histograms, filepath, title=title)
if __name__ == '__main__':
main()
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict
def create_folders(root_folder : str, experiment_id : str) -> Dict[str, str]:
experiment_folder = root_folder + '/' + experiment_id
folders = {
'csv' : experiment_folder + '/csv' ,
'json' : experiment_folder + '/json',
'png' : experiment_folder + '/png' ,
}
for folder in folders.values():
pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
return folders
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from typing import Dict, List, Tuple
from .HistogramData import HistogramData
def results_to_histograms(
results : List[Dict], key_labels : List[str], extra_labels : List[str] = []
) -> Dict[Tuple, HistogramData]:
histograms : Dict[Tuple, HistogramData] = dict()
for result in results:
metric : Dict = result['metric']
labels = [metric[l] for l in key_labels]
if len(extra_labels) > 0: labels.extend(extra_labels)
histogram_key = tuple(labels)
histogram = histograms.get(histogram_key)
if histogram is None:
histogram = histograms.setdefault(
histogram_key, HistogramData(timestamps=set(), bins=set(), data=dict()))
bin_ = float(metric['le'])
histogram.bins.add(bin_)
values : List[Tuple[int, str]] = result['values']
for timestamp,count in values:
histogram.timestamps.add(timestamp)
histogram.data.setdefault(timestamp, dict())[bin_] = int(count)
return histograms
def unaccumulate_histogram(
histogram : HistogramData, process_bins : bool = True, process_timestamps : bool = True
) -> None:
timestamps = sorted(histogram.timestamps)
bins = sorted(histogram.bins)
accumulated_over_time = {b:0 for b in bins}
for timestamp in timestamps:
bin_to_count = histogram.data.get(timestamp)
if bin_to_count is None: continue
accumulated_over_bins = 0
for bin_ in bins:
count = bin_to_count[bin_]
if process_bins:
count -= accumulated_over_bins
accumulated_over_bins += count
if process_timestamps:
count -= accumulated_over_time[bin_]
accumulated_over_time[bin_] += count
bin_to_count[bin_] = count
def unaccumulate_histograms(
histograms : Dict[Tuple, HistogramData], process_bins : bool = True, process_timestamps : bool = True
) -> None:
for histogram in histograms.values():
unaccumulate_histogram(histogram, process_bins=process_bins, process_timestamps=process_timestamps)
def save_histogram(filepath : str, histogram : HistogramData) -> None:
timestamps = sorted(histogram.timestamps)
bins = sorted(histogram.bins)
header = [''] + [str(b) for b in bins]
with open(filepath, 'w', encoding='UTF-8') as f:
writer = csv.writer(f)
writer.writerow(header)
for timestamp in timestamps:
bin_to_count = histogram.data.get(timestamp, {})
writer.writerow([timestamp] + [
str(bin_to_count.get(bin_, 0))
for bin_ in bins
])
def save_histograms(histograms : Dict[Tuple, HistogramData], data_folder : str) -> None:
for histogram_keys, histogram_data in histograms.items():
filepath = '{:s}/{:s}.csv'.format(data_folder, '__'.join(histogram_keys))
save_histogram(filepath, histogram_data)
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Set
@dataclass
class HistogramData:
timestamps : Set[int]
bins : Set[float]
data : Dict[int, Dict[float, int]]
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
from typing import Dict, Optional, Tuple
from .HistogramData import HistogramData
def plot_histogram(
histograms : Dict[Tuple, HistogramData], filepath : str,
title : Optional[str] = None, label_separator : str = ' ', dpi : int = 600,
legend_loc : str = 'best', grid : bool = True
) -> None:
# plot the cumulative histogram
_, ax = plt.subplots(figsize=(8, 8))
num_series = 0
for histogram_keys, histogram_data in histograms.items():
bins = sorted(histogram_data.bins)
last_timestamp = max(histogram_data.timestamps)
counts = histogram_data.data.get(last_timestamp)
counts = [int(counts[bin_]) for bin_ in bins]
if sum(counts) == 0: continue
num_series += 1
bins.insert(0, 0)
bins = np.array(bins).astype(float)
counts = np.array(counts).astype(float)
assert len(bins) == len(counts) + 1
centroids = (bins[1:] + bins[:-1]) / 2
label = label_separator.join(histogram_keys)
ax.hist(centroids, bins=bins, weights=counts, range=(min(bins), max(bins)), density=True,
histtype='step', cumulative=True, label=label)
if num_series == 0: return
ax.grid(grid)
ax.legend(loc=legend_loc)
if title is not None: ax.set_title(title)
ax.set_xlabel('seconds')
ax.set_ylabel('Likelihood of occurrence')
plt.xscale('log')
plt.savefig(filepath, dpi=(dpi))
plt.show()
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, requests, time
from datetime import datetime
from typing import Dict, List, Optional
def get_prometheus_series_names(
address : str, port : int, metric_match : str, time_start : datetime, time_end : datetime, timeout : int = 10,
raw_json_filepath : Optional[str] = None
) -> List[str]:
str_url = 'http://{:s}:{:d}/api/v1/label/__name__/values'.format(address, port)
params = {
'match[]': '{{__name__=~"{:s}"}}'.format(metric_match),
'start': time.mktime(time_start.timetuple()),
'end' : time.mktime(time_end.timetuple()),
}
response = requests.get(str_url, params=params, timeout=timeout)
results = response.json()
if raw_json_filepath is not None:
with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
f.write(json.dumps(results, sort_keys=True))
assert results['status'] == 'success'
return results['data']
def get_prometheus_range(
address : str, port : int, metric_name : str, labels : Dict[str, str], time_start : datetime, time_end : datetime,
time_step : str, timeout : int = 10, raw_json_filepath : Optional[str] = None
) -> List[Dict]:
str_url = 'http://{:s}:{:d}/api/v1/query_range'.format(address, port)
str_query = metric_name
if len(labels) > 0:
str_labels = ', '.join(['{:s}="{:s}"'.format(name, value) for name,value in labels.items()])
str_query += '{{{:s}}}'.format(str_labels)
params = {
'query': str_query,
'start': time.mktime(time_start.timetuple()),
'end' : time.mktime(time_end.timetuple()),
'step' : time_step,
}
response = requests.get(str_url, params=params, timeout=timeout)
results = response.json()
if raw_json_filepath is not None:
with open(raw_json_filepath, 'w', encoding='UTF-8') as f:
f.write(json.dumps(results, sort_keys=True))
assert results['status'] == 'success'
assert results['data']['resultType'] == 'matrix'
return results['data']['result']
# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.