Skip to content
Snippets Groups Projects
Commit 82cf6e38 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Multiple pre-release fixes

parent e597be50
No related branches found
No related tags found
2 merge requests!142Release TeraFlowSDN 2.1,!140Multiple pre-release fixes in OFC'22 and ECOC'22 tests and related components
......@@ -37,7 +37,7 @@ spec:
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:9090"]
......
......@@ -36,7 +36,7 @@ spec:
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:10020"]
......
......@@ -36,7 +36,7 @@ spec:
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:3030"]
......
......@@ -36,7 +36,7 @@ spec:
- containerPort: 9192
env:
- name: LOG_LEVEL
value: "INFO"
value: "DEBUG"
- name: SLICE_GROUPING
value: "DISABLE"
envFrom:
......
......@@ -111,7 +111,7 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
str_location_id = grpc_message_to_json_string(constraint.endpoint_location.location)
location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id)
num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()}
num_disjoint_paths = min(num_endpoints_per_location)
num_disjoint_paths = max(num_endpoints_per_location)
update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0)
return target
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json, logging, requests
import json, logging, requests, uuid
from typing import Dict, List, Optional, Tuple, Union
from common.proto.context_pb2 import (
Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum)
......@@ -210,7 +210,7 @@ class _Algorithm:
response_list = self.json_reply.get('response-list', [])
reply = PathCompReply()
grpc_services : Dict[Tuple[str, str], Service] = {}
grpc_connections : Dict[str, Connection] = {}
#grpc_connections : Dict[str, Connection] = {}
for response in response_list:
orig_service_id = response['serviceId']
context_uuid = orig_service_id['contextId']
......@@ -251,23 +251,23 @@ class _Algorithm:
self.logger.debug('BASIC connections = {:s}'.format(str(connections)))
for connection in connections:
connection_uuid,service_type,path_hops,_ = connection
service_key = (context_uuid, connection_uuid)
service_uuid,service_type,path_hops,_ = connection
service_key = (context_uuid, service_uuid)
if service_key in grpc_services: continue
grpc_service = self.add_service_to_reply(
reply, context_uuid, connection_uuid, service_type, path_hops=path_hops,
reply, context_uuid, service_uuid, service_type, path_hops=path_hops,
config_rules=orig_config_rules)
grpc_services[service_key] = grpc_service
for connection in connections:
connection_uuid,_,path_hops,dependencies = connection
service_uuid,_,path_hops,dependencies = connection
service_key = (context_uuid, connection_uuid)
service_key = (context_uuid, service_uuid)
grpc_service = grpc_services.get(service_key)
if grpc_service is None: raise Exception('Service({:s}) not found'.format(str(service_key)))
#if connection_uuid in grpc_connections: continue
grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops)
grpc_connection = self.add_connection_to_reply(reply, str(uuid.uuid4()), grpc_service, path_hops)
#grpc_connections[connection_uuid] = grpc_connection
for sub_service_uuid in dependencies:
......
......@@ -85,10 +85,10 @@ def convert_explicit_path_hops_to_connections(
LOGGER.debug(' create and terminate underlying connection')
# create underlying connection
connection_uuid = str(uuid.uuid4())
sub_service_uuid = str(uuid.uuid4())
prv_service_type = connection_stack.queue[-1][1]
service_type = get_service_type(res_class[1], prv_service_type)
connection_stack.put((connection_uuid, service_type, [path_hop], []))
connection_stack.put((sub_service_uuid, service_type, [path_hop], []))
# underlying connection ended
connection = connection_stack.get()
......@@ -102,10 +102,10 @@ def convert_explicit_path_hops_to_connections(
elif prv_res_class[0] > res_class[0]:
# create underlying connection
LOGGER.debug(' create underlying connection')
connection_uuid = str(uuid.uuid4())
sub_service_uuid = str(uuid.uuid4())
prv_service_type = connection_stack.queue[-1][1]
service_type = get_service_type(res_class[1], prv_service_type)
connection_stack.put((connection_uuid, service_type, [path_hop], []))
connection_stack.put((sub_service_uuid, service_type, [path_hop], []))
elif prv_res_class[0] == res_class[0]:
# same resource group kind
LOGGER.debug(' same resource group kind')
......@@ -120,10 +120,10 @@ def convert_explicit_path_hops_to_connections(
connections.append(connection)
connection_stack.queue[-1][3].append(connection[0])
connection_uuid = str(uuid.uuid4())
sub_service_uuid = str(uuid.uuid4())
prv_service_type = connection_stack.queue[-1][1]
service_type = get_service_type(res_class[1], prv_service_type)
connection_stack.put((connection_uuid, service_type, [path_hop], []))
connection_stack.put((sub_service_uuid, service_type, [path_hop], []))
elif prv_res_class[0] < res_class[0]:
# underlying connection ended
LOGGER.debug(' underlying connection ended')
......
......@@ -98,14 +98,27 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
context_client, request.service_id, rw_copy=False,
include_config_rules=True, include_constraints=True, include_endpoint_ids=True)
# Identify service constraints
num_disjoint_paths = None
is_diverse = False
gps_location_aware = False
for constraint in request.service_constraints:
if constraint.WhichOneof('constraint') != 'endpoint_location': continue
if constraint.endpoint_location.location.WhichOneof('location') != 'gps_position': continue
gps_location_aware = True
constraint_kind = constraint.WhichOneof('constraint')
if constraint_kind == 'sla_availability':
num_disjoint_paths = constraint.sla_availability.num_disjoint_paths
elif constraint_kind == 'custom':
if constraint.custom.constraint_type == 'diversity': is_diverse = True
elif constraint_kind == 'endpoint_location':
location = constraint.endpoint_location.location
if location.WhichOneof('location') == 'gps_position': gps_location_aware = True
else:
continue
LOGGER.debug('num_disjoint_paths={:s}'.format(str(num_disjoint_paths)))
LOGGER.debug('is_diverse={:s}'.format(str(is_diverse)))
LOGGER.debug('gps_location_aware={:s}'.format(str(gps_location_aware)))
if _service is not None and gps_location_aware:
if _service is not None and num_disjoint_paths is None and not is_diverse and gps_location_aware:
LOGGER.debug(' Removing previous service')
tasks_scheduler = TasksScheduler(self.service_handler_factory)
tasks_scheduler.compose_from_service(_service, is_delete=True)
......@@ -140,6 +153,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
str_service_status = ServiceStatusEnum.Name(service_status.service_status)
raise Exception(MSG.format(service_key, str_service_status))
# Normal service
del service.service_endpoint_ids[:] # pylint: disable=no-member
for endpoint_id in request.service_endpoint_ids:
service.service_endpoint_ids.add().CopyFrom(endpoint_id) # pylint: disable=no-member
......@@ -192,12 +206,6 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
context_client, service_id_with_uuids, rw_copy=False,
include_config_rules=True, include_constraints=True, include_endpoint_ids=True)
num_disjoint_paths = 0
for constraint in request.service_constraints:
if constraint.WhichOneof('constraint') == 'sla_availability':
num_disjoint_paths = constraint.sla_availability.num_disjoint_paths
break
num_disjoint_paths = 1 if num_disjoint_paths is None or num_disjoint_paths == 0 else num_disjoint_paths
num_expected_endpoints = num_disjoint_paths * 2
......
......@@ -20,7 +20,20 @@
export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
# Set the list of components, separated by spaces, you want to build images for, and deploy.
export TFS_COMPONENTS="context device automation monitoring pathcomp service slice compute webui"
#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
export TFS_COMPONENTS="context device pathcomp service slice compute webui"
# Uncomment to activate Monitoring
#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
# Uncomment to activate Automation and Policy Manager
#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
# Uncomment to activate Optical CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
# Uncomment to activate L3 CyberSecurity
#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
# Set the tag you want to use for your images.
export TFS_IMAGE_TAG="dev"
......@@ -31,6 +44,12 @@ export TFS_K8S_NAMESPACE="tfs"
# Set additional manifest files to be applied after the deployment
export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
# Uncomment to monitor performance of components
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
# Uncomment when deploying Optical CyberSecurity
#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
# Set the new Grafana admin password
export TFS_GRAFANA_PASSWORD="admin123+"
......@@ -43,6 +62,12 @@ export TFS_SKIP_BUILD=""
# Set the namespace where CockroackDB will be deployed.
export CRDB_NAMESPACE="crdb"
# Set the external port CockroackDB Postgre SQL interface will be exposed to.
export CRDB_EXT_PORT_SQL="26257"
# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
export CRDB_EXT_PORT_HTTP="8081"
# Set the database username to be used by Context.
export CRDB_USERNAME="tfs"
......@@ -57,7 +82,7 @@ export CRDB_DATABASE="tfs"
export CRDB_DEPLOY_MODE="single"
# Disable flag for dropping database, if it exists.
export CRDB_DROP_DATABASE_IF_EXISTS=""
export CRDB_DROP_DATABASE_IF_EXISTS="YES"
# Disable flag for re-deploying CockroachDB from scratch.
export CRDB_REDEPLOY=""
......@@ -68,6 +93,12 @@ export CRDB_REDEPLOY=""
# Set the namespace where NATS will be deployed.
export NATS_NAMESPACE="nats"
# Set the external port NATS Client interface will be exposed to.
export NATS_EXT_PORT_CLIENT="4222"
# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
export NATS_EXT_PORT_HTTP="8222"
# Disable flag for re-deploying NATS from scratch.
export NATS_REDEPLOY=""
......@@ -77,6 +108,15 @@ export NATS_REDEPLOY=""
# Set the namespace where QuestDB will be deployed.
export QDB_NAMESPACE="qdb"
# Set the external port QuestDB Postgre SQL interface will be exposed to.
export QDB_EXT_PORT_SQL="8812"
# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
export QDB_EXT_PORT_ILP="9009"
# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
export QDB_EXT_PORT_HTTP="9000"
# Set the database username to be used for QuestDB.
export QDB_USERNAME="admin"
......@@ -86,8 +126,20 @@ export QDB_PASSWORD="quest"
# Set the table name to be used by Monitoring for KPIs.
export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
# Set the table name to be used by Slice for plotting groups.
export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
# Disable flag for dropping tables if they exist.
export QDB_DROP_TABLES_IF_EXIST=""
export QDB_DROP_TABLES_IF_EXIST="YES"
# Disable flag for re-deploying QuestDB from scratch.
export QDB_REDEPLOY=""
# ----- K8s Observability ------------------------------------------------------
# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
export PROM_EXT_PORT_HTTP="9090"
# Set the external port Grafana HTTP Dashboards will be exposed to.
export GRAF_EXT_PORT_HTTP="3000"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment