Commits (4)
......@@ -36,7 +36,7 @@ spec:
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
- name: SLICE_GROUPING
value: "DISABLE"
envFrom:
......
......@@ -106,8 +106,68 @@ class DescriptorLoader:
self.__links = self.__descriptors.get('links' , [])
self.__services = self.__descriptors.get('services' , [])
self.__slices = self.__descriptors.get('slices' , [])
self.__slices = self.__descriptors.get('ietf-network-slice-service:network-slice-services', {})
self.__connections = self.__descriptors.get('connections', [])
if self.__slices:
json_out = {"slices": [
{
"slice_id": {
"context_id": {"context_uuid": {"uuid": "admin"}},
"slice_uuid": {}
},
"name": {},
"slice_config": {"config_rules": [
{"action": 1, "custom": {"resource_key": "/settings", "resource_value": {
"address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:333", "mtu": 1512
}}}
]},
"slice_constraints": [
{"sla_capacity": {"capacity_gbps": 20.0}},
{"sla_availability": {"availability": 20.0, "num_disjoint_paths": 1, "all_active": True}},
{"sla_isolation": {"isolation_level": [0]}}
],
"slice_endpoint_ids": [
],
"slice_status": {"slice_status": 1}
}
]}
for slice_service in self.__slices["slice-service"]:
for slice in json_out["slices"]:
slice["slice_id"]["slice_uuid"] = { "uuid": slice_service["id"]}
slice["name"] = slice_service["description"]
sdp = slice_service["sdps"]["sdp"]
for elemento in sdp:
attcircuits = elemento["attachment-circuits"]["attachment-circuit"]
for attcircuit in attcircuits:
resource_key = "/device[{sdp_id}]/endpoint[{endpoint_id}]/settings".format(sdp_id = elemento["id"], endpoint_id = attcircuit["ac-tp-id"])
for tag in attcircuit['ac-tags']['ac-tag']:
if tag.get('tag-type') == 'ietf-nss:vlan-id':
vlan_id = tag.get('value')
else:
vlan_id = 0
slice["slice_config"]["config_rules"].append( {"action": 1, "custom": {"resource_key": resource_key, "resource_value": {
"router_id": elemento.get("node-id",[]), "sub_interface_index": 0, "vlan_id": vlan_id
}}})
slice["slice_endpoint_ids"].append({
"device_id": {"device_uuid": {"uuid": elemento["id"]}},
"endpoint_uuid": {"uuid": attcircuit["ac-tp-id"]},
"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}},
"topology_uuid": {"uuid": "admin"}}
})
slice["slice_constraints"].append({"endpoint_location": {
"endpoint_id": {"device_id": {"device_uuid": {"uuid": elemento["id"]}}, "endpoint_uuid": {"uuid": attcircuit["ac-tp-id"]}},
"location": {"region": "4"}
}})
# Convertir a JSON de salida
#json_output = json.dumps(json_out, indent=2)
self.__slices = json_out.get('slices' , [])
self.__contexts_add = None
self.__topologies_add = None
self.__devices_add = None
......@@ -195,7 +255,7 @@ class DescriptorLoader:
_slices = {}
for slice_ in self.__slices:
context_uuid = slice_['slice_id']['context_id']['context_uuid']['uuid']
_slices.setdefault(context_uuid, []).append(slice_)
_slices.setdefault(context_uuid, []).append(slice_) #no tenemos context_uuid en este formato, lo meto a mano?
return _slices
@property
......@@ -236,8 +296,7 @@ class DescriptorLoader:
self.__ctx_cli.connect()
self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add )
self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add)
self._process_descr('controller', 'add', self.__ctx_cli.SetDevice, Device, controllers )
self._process_descr('device', 'add', self.__ctx_cli.SetDevice, Device, network_devices )
self._process_descr('device', 'add', self.__ctx_cli.SetDevice, Device, self.__devices )
self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links )
self._process_descr('service', 'add', self.__ctx_cli.SetService, Service, self.__services )
self._process_descr('slice', 'add', self.__ctx_cli.SetSlice, Slice, self.__slices )
......@@ -265,29 +324,24 @@ class DescriptorLoader:
self.__services_add = get_descriptors_add_services(self.__services)
self.__slices_add = get_descriptors_add_slices(self.__slices)
controllers_add, network_devices_add = split_controllers_and_network_devices(self.__devices_add)
self.__ctx_cli.connect()
self.__dev_cli.connect()
self.__svc_cli.connect()
self.__slc_cli.connect()
self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add )
self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add)
self._process_descr('controller', 'add', self.__dev_cli.AddDevice, Device, controllers_add )
self._process_descr('device', 'add', self.__dev_cli.AddDevice, Device, network_devices_add )
self._process_descr('device', 'config', self.__dev_cli.ConfigureDevice, Device, self.__devices_config)
self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links )
self._process_descr('service', 'add', self.__svc_cli.CreateService, Service, self.__services_add )
self._process_descr('service', 'update', self.__svc_cli.UpdateService, Service, self.__services )
self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add )
self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices )
# By default the Context component automatically assigns devices and links to topologies based on their
# endpoints, and assigns topologies, services, and slices to contexts based on their identifiers.
# The following statement is useless; up to now, any use case requires assigning a topology, service, or
# slice to a different context.
self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add )
self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add)
self._process_descr('device', 'add', self.__dev_cli.AddDevice, Device, self.__devices_add )
self._process_descr('device', 'config', self.__dev_cli.ConfigureDevice, Device, self.__devices_config)
self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links )
self._process_descr('service', 'add', self.__svc_cli.CreateService, Service, self.__services_add )
self._process_descr('service', 'update', self.__svc_cli.UpdateService, Service, self.__services )
self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add )
self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices )
# Update context and topology is useless:
# - devices and links are assigned to topologies automatically by Context component
# - topologies, services, and slices are assigned to contexts automatically by Context component
#self._process_descr('context', 'update', self.__ctx_cli.SetContext, Context, self.__contexts )
# In some cases, it might be needed to assign devices and links to multiple topologies; the
......
......@@ -59,8 +59,6 @@ def format_custom_config_rules(config_rules : List[Dict]) -> List[Dict]:
if isinstance(custom_resource_value, (dict, list)):
custom_resource_value = json.dumps(custom_resource_value, sort_keys=True, indent=0)
config_rule['custom']['resource_value'] = custom_resource_value
elif not isinstance(custom_resource_value, str):
config_rule['custom']['resource_value'] = str(custom_resource_value)
return config_rules
def format_device_custom_config_rules(device : Dict) -> Dict:
......
......@@ -15,7 +15,7 @@
import logging, lxml.etree as ET
from typing import Any, Dict, List, Tuple
from .Namespace import NAMESPACES
from .Tools import add_value_from_tag
from .Tools import add_value_from_tag, add_int_from_tag
LOGGER = logging.getLogger(__name__)
......@@ -55,6 +55,8 @@ XPATH_PORTS = "//ocp:components/ocp:component"
def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
response = []
parent_types = {}
#Initialized count to 0 for index
count = 0
for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES):
LOGGER.info('xml_component inventario = {:s}'.format(str(ET.tostring(xml_component))))
inventory = {}
......@@ -63,6 +65,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
inventory['class'] = ''
inventory['attributes'] = {}
component_reference = []
component_name = xml_component.find('ocp:name', namespaces=NAMESPACES)
if component_name is None or component_name.text is None: continue
......@@ -83,6 +86,34 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
if inventory['class'] == 'CPU' or inventory['class'] == 'STORAGE': continue
##Added (after the checking of the name and the class)
#Physical index- Index of the component in the array
add_int_from_tag(inventory['attributes'], 'physical-index', count)
count +=1
##Added
#FRU
if inventory['class'] == 'FRU':
component_isfru = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES)
add_value_from_tag(inventory['attributes'], 'isfru', component_isfru)
##ID
component_id = xml_component.find('ocp:state/ocp:id', namespaces=NAMESPACES)
if not component_id is None:
add_value_from_tag(inventory['attributes'], 'id', component_id)
##OPER_STATUS
component_oper_status = xml_component.find('ocp:state/ocp:oper-status', namespaces=NAMESPACES)
if not component_oper_status is None:
add_value_from_tag(inventory['attributes'], 'oper-status', component_oper_status)
##MODEL_ID
component_model_id = xml_component.find('ocp:state/ocp:entity-id', namespaces=NAMESPACES)
if not component_model_id is None:
add_value_from_tag(inventory['attributes'], 'model-id', component_model_id)
##
component_empty = xml_component.find('ocp:state/ocp:empty', namespaces=NAMESPACES)
if not component_empty is None:
add_value_from_tag(inventory['attributes'], 'empty', component_empty)
......
......@@ -26,6 +26,11 @@ def add_value_from_tag(target : Dict, field_name: str, field_value : ET.Element,
if cast is not None: field_value = cast(field_value)
target[field_name] = field_value
def add_int_from_tag(target : Dict, field_name: str, field_value : int, cast=None) -> None:
if field_value is None: return
if cast is not None: field_value = cast(field_value)
target[field_name] = field_value
def add_value_from_collection(target : Dict, field_name: str, field_value : Collection) -> None:
if field_value is None or len(field_value) == 0: return
target[field_name] = field_value
......
......@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#agrupar slices agrupar recursos para no hacer mas configs
from typing import Optional
import grpc, json, logging #, deepdiff
from common.proto.context_pb2 import (
......@@ -30,33 +32,34 @@ from interdomain.client.InterdomainClient import InterdomainClient
from service.client.ServiceClient import ServiceClient
from .slice_grouper.SliceGrouper import SliceGrouper
LOGGER = logging.getLogger(__name__)
LOGGER = logging.getLogger(__name__) #crea un objeto de registro con el nombre del modulo actual
METRICS_POOL = MetricsPool('Slice', 'RPC')
class SliceServiceServicerImpl(SliceServiceServicer):
class SliceServiceServicerImpl(SliceServiceServicer): # Implementa el servicio gRPC definido por SliceServiceServicer
def __init__(self):
LOGGER.debug('Creating Servicer...')
self._slice_grouper = SliceGrouper()
self._slice_grouper = SliceGrouper() #crea una instancia de slicegrouper
LOGGER.debug('Servicer Created')
def create_update(self, request : Slice) -> SliceId:
# Set slice status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the slice is
# being modified.
context_client = ContextClient()
slice_ro : Optional[Slice] = get_slice_by_id(context_client, request.slice_id, rw_copy=False)
slice_ro : Optional[Slice] = get_slice_by_id(context_client, request.slice_id, rw_copy=False) # se obtiene la slice con el sliceId de la req
slice_rw = Slice()
slice_rw = Slice() #crea nueva slice desde la slice de la req
slice_rw.CopyFrom(request if slice_ro is None else slice_ro)
if len(request.name) > 0: slice_rw.name = request.name
if len(request.name) > 0: slice_rw.name = request.name #actualizamos el nombre y estado de la slice rw
slice_rw.slice_owner.CopyFrom(request.slice_owner) # pylint: disable=no-member
slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member
#copiamos endpoints, reglas y configuraciones de la req a la slice
copy_endpoint_ids(request.slice_endpoint_ids, slice_rw.slice_endpoint_ids ) # pylint: disable=no-member
copy_constraints (request.slice_constraints, slice_rw.slice_constraints ) # pylint: disable=no-member
copy_config_rules(request.slice_config.config_rules, slice_rw.slice_config.config_rules) # pylint: disable=no-member
slice_id_with_uuids = context_client.SetSlice(slice_rw)
slice_id_with_uuids = context_client.SetSlice(slice_rw) #actualizar o crear la slice en la db
if len(slice_rw.slice_endpoint_ids) < 2: # pylint: disable=no-member
# unable to identify the kind of slice; just update endpoints, constraints and config rules
......@@ -65,8 +68,9 @@ class SliceServiceServicerImpl(SliceServiceServicer):
reply = context_client.SetSlice(slice_rw)
context_client.close()
return reply
#si tiene menos de 2 endpoints se omite la actualizacion y se retorna el sliceid
slice_with_uuids = context_client.GetSlice(slice_id_with_uuids)
slice_with_uuids = context_client.GetSlice(slice_id_with_uuids) #obtenemos la slice actualizada
#LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice)))
#json_updated_slice = grpc_message_to_json(request)
......@@ -74,8 +78,8 @@ class SliceServiceServicerImpl(SliceServiceServicer):
#changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice)
#LOGGER.info('changes = {:s}'.format(str(changes)))
if is_inter_domain(context_client, slice_with_uuids.slice_endpoint_ids):
interdomain_client = InterdomainClient()
if is_inter_domain(context_client, slice_with_uuids.slice_endpoint_ids): #si la slice es interdominio
interdomain_client = InterdomainClient() #que es interdomain client?
slice_id = interdomain_client.RequestSlice(slice_with_uuids)
slice_ = context_client.GetSlice(slice_id)
slice_active = Slice()
......@@ -97,10 +101,10 @@ class SliceServiceServicerImpl(SliceServiceServicer):
service_client = ServiceClient()
try:
_service = context_client.GetService(service_id)
_service = context_client.GetService(service_id) #obtener info de un servicio si existe
except: # pylint: disable=bare-except
# pylint: disable=no-member
service_request = Service()
service_request = Service() # sino se crea un nuevo servicio
service_request.service_id.CopyFrom(service_id)
service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN
service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
......@@ -109,6 +113,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
service_request = Service()
service_request.CopyFrom(_service)
#actualiza el servicio con la info de la slice
# pylint: disable=no-member
copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids)
copy_constraints(request.slice_constraints, service_request.service_constraints)
......@@ -162,11 +167,11 @@ class SliceServiceServicerImpl(SliceServiceServicer):
slice_active.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member
context_client.SetSlice(slice_active)
service_client.close()
context_client.close()
service_client.close() #liberar recursos, que es realmente?
context_client.close() #db teraflow
return slice_id
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER) #agrega funcionalidades de metrica y seguridad
def CreateSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId:
#try:
# slice_ = context_client.GetSlice(request.slice_id)
......@@ -196,7 +201,7 @@ class SliceServiceServicerImpl(SliceServiceServicer):
@safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
def DeleteSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty:
context_client = ContextClient()
context_client = ContextClient() #coge la info de una slice
try:
_slice = context_client.GetSlice(request)
except: # pylint: disable=bare-except
......@@ -205,9 +210,11 @@ class SliceServiceServicerImpl(SliceServiceServicer):
_slice_rw = Slice()
_slice_rw.CopyFrom(_slice)
#cambia el status
_slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member
context_client.SetSlice(_slice_rw)
#elimina la slice considerando si es interdominio o no, y desagrupa la slice eliminada
#elimina los servicios asociados a la slice
if is_inter_domain(context_client, _slice.slice_endpoint_ids):
interdomain_client = InterdomainClient()
slice_id = interdomain_client.DeleteSlice(request)
......
......@@ -14,7 +14,7 @@
import logging, pandas, threading
from typing import Dict, Optional, Tuple
from sklearn.cluster import KMeans
from sklearn.cluster import KMeans #algoritmo de agrupamiento de scikit-learn (biblio de aprendizaje automatico)
from common.proto.context_pb2 import Slice
from common.tools.grpc.Tools import grpc_message_to_json_string
from .Constants import SLICE_GROUPS
......@@ -27,30 +27,30 @@ LOGGER = logging.getLogger(__name__)
class SliceGrouper:
def __init__(self) -> None:
self._lock = threading.Lock()
self._is_enabled = is_slice_grouping_enabled()
self._lock = threading.Lock() #controla el acceso concurrente
self._is_enabled = is_slice_grouping_enabled() #esta habilitado el agrupamiento de slices?
LOGGER.info('Slice Grouping: {:s}'.format('ENABLED' if self._is_enabled else 'DISABLED'))
if not self._is_enabled: return
metrics_exporter = MetricsExporter()
metrics_exporter = MetricsExporter() #instancia de la clase
metrics_exporter.create_table()
self._slice_groups = create_slice_groups(SLICE_GROUPS)
self._slice_groups = create_slice_groups(SLICE_GROUPS) #grupos de slices
# Initialize and fit K-Means with the pre-defined clusters we want, i.e., one per slice group
df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps'])
k_means = KMeans(n_clusters=df_groups.shape[0])
df_groups = pandas.DataFrame(SLICE_GROUPS, columns=['name', 'availability', 'capacity_gbps']) #data frame con info de los grupos
k_means = KMeans(n_clusters=df_groups.shape[0]) #modelo utilizado para el agrupamiento
k_means.fit(df_groups[['availability', 'capacity_gbps']])
df_groups['label'] = k_means.predict(df_groups[['availability', 'capacity_gbps']])
self._k_means = k_means
self._df_groups = df_groups
self._group_mapping : Dict[str, Dict] = {
self._group_mapping : Dict[str, Dict] = { #Dict = dictionary
group['name']:{k:v for k,v in group.items() if k != 'name'}
for group in list(df_groups.to_dict('records'))
for group in list(df_groups.to_dict('records')) #mapeo de nombres de grupo a sus atributos
}
label_to_group = {}
label_to_group = {} #mapeo de etiquetas a nombres de grupo
for group_name,group_attrs in self._group_mapping.items():
label = group_attrs['label']
availability = group_attrs['availability']
......@@ -60,7 +60,7 @@ class SliceGrouper:
label_to_group[label] = group_name
self._label_to_group = label_to_group
def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]:
def _select_group(self, slice_obj : Slice) -> Optional[Tuple[str, float, float]]: #selecciona un grupo para una slice
with self._lock:
grouping_parameters = get_slice_grouping_parameters(slice_obj)
LOGGER.debug('[_select_group] grouping_parameters={:s}'.format(str(grouping_parameters)))
......@@ -78,16 +78,16 @@ class SliceGrouper:
return group_name, availability, capacity_gbps
@property
def is_enabled(self): return self._is_enabled
def is_enabled(self): return self._is_enabled #indica si el agrupamiento de slices esta habilitado
def group(self, slice_obj : Slice) -> bool:
def group(self, slice_obj : Slice) -> bool: #determina el grupo al que debe pertenecer la slice
LOGGER.debug('[group] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
selected_group = self._select_group(slice_obj)
LOGGER.debug('[group] selected_group={:s}'.format(str(selected_group)))
if selected_group is None: return False
return add_slice_to_group(slice_obj, selected_group)
def ungroup(self, slice_obj : Slice) -> bool:
def ungroup(self, slice_obj : Slice) -> bool: # desagrupa la slice de un grupo
LOGGER.debug('[ungroup] slice_obj={:s}'.format(grpc_message_to_json_string(slice_obj)))
selected_group = self._select_group(slice_obj)
LOGGER.debug('[ungroup] selected_group={:s}'.format(str(selected_group)))
......
......@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import base64, json, logging #, re
import base64, json, logging
import traceback #, re
from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request
from common.proto.context_pb2 import ContextList, Empty, TopologyId, TopologyList
from common.tools.descriptor.Loader import DescriptorLoader, compose_notifications
......@@ -113,6 +114,7 @@ def home():
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('Descriptor load failed')
flash(f'Descriptor load failed: `{str(e)}`', 'danger')
traceback.print_exc() # Agregar esta línea para imprimir el traceback completo
finally:
context_client.close()
device_client.close()
......