Skip to content
Snippets Groups Projects
Commit 53ba0ee6 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

Pre-merge code cleanup

parent dcf4e16f
No related branches found
No related tags found
2 merge requests!294Release TeraFlowSDN 4.0,!257Resolve "Create QoSProfile component"
......@@ -171,9 +171,6 @@ function crdb_drop_database_single() {
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE};"
kubectl exec -i --namespace ${CRDB_NAMESPACE} cockroachdb-0 -- \
./cockroach sql --certs-dir=/cockroach/cockroach-certs --url=${CRDB_CLIENT_URL} \
--execute "DROP DATABASE IF EXISTS ${CRDB_DATABASE_QOSPROFILE};"
echo
}
......
......@@ -163,7 +163,6 @@ kubectl create secret generic kfk-kpi-data --namespace ${TFS_K8S_NAMESPACE} --ty
--from-literal=KFK_SERVER_PORT=${KFK_SERVER_PORT}
printf "\n"
echo "Create secret with NATS data"
NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service ${NATS_NAMESPACE} -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
if [ -z "$NATS_CLIENT_PORT" ]; then
......
......@@ -28,6 +28,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_gene
# Uncomment to activate Monitoring Framework (new)
#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation"
# Uncomment to activate QoS Profiles
#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile"
# Uncomment to activate BGP-LS Speaker
#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker"
......
......@@ -88,4 +88,4 @@ class QoSProfileClient:
LOGGER.debug('GetConstraintListFromQoSProfile request: {:s}'.format(grpc_message_to_json_string(request)))
response = self.stub.GetConstraintListFromQoSProfile(request)
LOGGER.debug('GetConstraintListFromQoSProfile result: {:s}'.format(grpc_message_to_json_string(response)))
return response
\ No newline at end of file
return response
......@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
psycopg2-binary==2.9.*
SQLAlchemy==1.4.*
sqlalchemy-cockroachdb==1.4.*
SQLAlchemy-Utils==0.38.*
\ No newline at end of file
SQLAlchemy-Utils==0.38.*
......@@ -12,47 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging, signal, sys, threading
from prometheus_client import start_http_server
from common.Constants import ServiceNameEnum
from common.Settings import (
ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
wait_for_environment_variables
)
from common.Settings import get_log_level, get_metrics_port
from common.tools.database.GenericDatabase import Database
from qos_profile.service.database.models.QoSProfile import QoSProfileModel
from .QoSProfileService import QoSProfileService
from .database.models.QoSProfile import QoSProfileModel
LOG_LEVEL = get_log_level()
logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
LOGGER = logging.getLogger(__name__)
terminate = threading.Event()
LOGGER : logging.Logger = None
def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
def signal_handler(signal, frame): # pylint: disable=redefined-outer-name,unused-argument
LOGGER.warning('Terminate signal received')
terminate.set()
def main():
global LOGGER # pylint: disable=global-statement
log_level = get_log_level()
logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
LOGGER = logging.getLogger(__name__)
wait_for_environment_variables([
get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ),
get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
])
LOGGER.info('Starting...')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
LOGGER.info('Starting...')
# Start metrics server
metrics_port = get_metrics_port()
start_http_server(metrics_port)
db_manager = Database(db_name=os.getenv('CRDB_DATABASE'), model=QoSProfileModel)
# Get Database Engine instance and initialize database, if needed
db_manager = Database(QoSProfileModel)
try:
db_manager.create_database()
......@@ -61,7 +49,7 @@ def main():
LOGGER.exception('Failed to check/create the database: {:s}'.format(str(db_manager.db_engine.url)))
raise e
# Starting service service
# Starting service
grpc_service = QoSProfileService(db_manager.db_engine)
grpc_service.start()
......
......@@ -13,62 +13,10 @@
# limitations under the License.
import sqlalchemy
from typing import Any, List
from sqlalchemy.orm import Session, sessionmaker, declarative_base
from sqlalchemy.sql import text
from sqlalchemy_cockroachdb import run_transaction
from sqlalchemy.orm import declarative_base
_Base = declarative_base()
def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None:
def index_storing(
index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str]
) -> Any:
str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields])
str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields])
INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});'
return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields))
statements = [
index_storing('device_configrule_device_uuid_rec_idx', 'device_configrule', ['device_uuid'], [
'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
]),
index_storing('service_configrule_service_uuid_rec_idx', 'service_configrule', ['service_uuid'], [
'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
]),
index_storing('slice_configrule_slice_uuid_rec_idx', 'slice_configrule', ['slice_uuid'], [
'position', 'kind', 'action', 'data', 'created_at', 'updated_at'
]),
index_storing('connection_service_uuid_rec_idx', 'connection', ['service_uuid'], [
'settings', 'created_at', 'updated_at'
]),
index_storing('service_constraint_service_uuid_rec_idx', 'service_constraint', ['service_uuid'], [
'position', 'kind', 'data', 'created_at', 'updated_at'
]),
index_storing('slice_constraint_slice_uuid_rec_idx', 'slice_constraint', ['slice_uuid'], [
'position', 'kind', 'data', 'created_at', 'updated_at'
]),
index_storing('endpoint_device_uuid_rec_idx', 'endpoint', ['device_uuid'], [
'topology_uuid', 'name', 'endpoint_type', 'kpi_sample_types', 'created_at', 'updated_at'
]),
index_storing('qos_profile_context_uuid_rec_idx', 'qos_profile', ['context_uuid'], [
'service_name', 'service_type', 'service_status', 'created_at', 'updated_at'
]),
index_storing('slice_context_uuid_rec_idx', 'slice', ['context_uuid'], [
'slice_name', 'slice_status', 'slice_owner_uuid', 'slice_owner_string', 'created_at', 'updated_at'
]),
index_storing('topology_context_uuid_rec_idx', 'topology', ['context_uuid'], [
'topology_name', 'created_at', 'updated_at'
]),
index_storing('device_component_idx', 'device_component', ['device_uuid'], [
'name', 'type', 'attributes', 'created_at', 'updated_at'
]),
]
def callback(session : Session) -> bool:
for stmt in statements: session.execute(stmt)
run_transaction(sessionmaker(bind=db_engine), callback)
def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
if drop_if_exists: _Base.metadata.drop_all(db_engine)
_Base.metadata.create_all(db_engine)
# create_performance_enhancers(db_engine)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment