diff --git a/deploy/all.sh b/deploy/all.sh index 332eb6ac65b62e21d0ce2276e7639fdcfde008d4..5a1f081fff2033a6a71217bd7230a48e7506ae5b 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"alto context device ztp monitoring pathcomp service slice nbi webui load_generator"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} @@ -155,8 +155,6 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} # If not already set, set the external port Grafana HTTP Dashboards will be exposed to. export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} -#If not already set, set the external port ALTO will be exposed to. -export ALTO_EXT_PORT_HTTP=${ALTO_EXT_PORT_HTTP:-"5000"} ######################################################################################################################## diff --git a/deploy/expose_dashboard.sh b/deploy/expose_dashboard.sh index 7180c4ca5c1147c27bd5a6a8f7aded9b2fcbd24e..f2391ab5d5b028bed11f23a2f95c06054d1f0cbc 100755 --- a/deploy/expose_dashboard.sh +++ b/deploy/expose_dashboard.sh @@ -24,8 +24,6 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} # If not already set, set the external port Grafana HTTP Dashboards will be exposed to. export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} -# If not already set, set the external port ALTO HTTP Dashboards will be exposed to. -# export ALTO_EXT_PORT_HTTP=${ALTO_EXT_PORT_HTTP:-"5000"} ######################################################################################################################## # Automated steps start here @@ -57,19 +55,6 @@ function expose_dashboard() { PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" echo - - # echo "ALTO Port Mapping" - # echo ">>> Expose ALTO HTTP Mgmt GUI port (5000->${ALTO_EXT_PORT_HTTP})" - # ALTO_PORT_HTTP=$(kubectl --namespace ${MONITORING_NAMESPACE} get service alto -o 'jsonpath={.spec.ports[?(@.name=="http")].port}') - # PATCH='{"data": {"'${ALTO_EXT_PORT_HTTP}'": "'${MONITORING_NAMESPACE}'/alto:'${ALTO_PORT_HTTP}'"}}' - # kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}" - - # PORT_MAP='{"containerPort": '${ALTO_EXT_PORT_HTTP}', "hostPort": '${ALTO_EXT_PORT_HTTP}'}' - # CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}' - # PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' - # kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}" - # echo - } if kubectl get namespace ${MONITORING_NAMESPACE} &> /dev/null; then diff --git a/deploy/tfs.sh b/deploy/tfs.sh index e93cb35442bca4aa555d0589df4fc90391d9376a..3fdbe77fb502c42aaf7dd507ab239f6b3bb20056 100755 --- a/deploy/tfs.sh +++ b/deploy/tfs.sh @@ -27,7 +27,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} # If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. # By default, only basic components are deployed -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator alto"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device ztp monitoring pathcomp service slice nbi webui load_generator"} # If not already set, set the tag you want to use for your images. export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} @@ -114,8 +114,6 @@ export PROM_EXT_PORT_HTTP=${PROM_EXT_PORT_HTTP:-"9090"} # If not already set, set the external port Grafana HTTP Dashboards will be exposed to. export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} -# If not already set, set the external port ALTO HTTP Dashboards will be exposed to. -# export ALTO_EXT_PORT_HTTP=${ALTO_EXT_PORT_HTTP:-"5000"} ######################################################################################################################## # Automated steps start here diff --git a/manifests/altoservice.yaml b/manifests/altoservice.yaml deleted file mode 100644 index 34f645a948350e47dd9dd1571bf7a402fd8906e4..0000000000000000000000000000000000000000 --- a/manifests/altoservice.yaml +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: altoservice -spec: - replicas: 1 - selector: - matchLabels: - app: altoservice - template: - metadata: - labels: - app: altoservice - spec: - containers: - - name: altoservice - image: localhost:32000/tfs/alto:dev #alto_light:lastets - ports: - - containerPort: 5000 ---- -apiVersion: v1 -kind: Service -metadata: - name: altoserviceforwarding -spec: - selector: - app: altoservice - type: ClusterIP - ports: - - protocol: TCP - name: "maps" - port: 5000 - targetPort: 5000 - - - # type: LoadBalancer - - - -# apiVersion: apps/v1 -# kind: Deployment -# metadata: -# name: altoservice -# spec: -# selector: -# matchLabels: -# app: altoservice -# # replicas: 1 -# template: -# metadata: -# labels: -# app: altoservice -# spec: -# terminationGracePeriodSeconds: 5 -# containers: -# - name: server -# image: alto_light:latest #localhost:32000/tfs/alto -# ports: -# - containerPort: 5000 -# - containerPort: 10050 -# - containerPort: 9192 -# env: -# - name: LOG_LEVEL -# value: "INFO" -# # readinessProbe: -# # exec: -# # command: ["/bin/grpc_health_probe", "-addr=:10050"] -# # livenessProbe: -# # exec: -# # command: ["/bin/grpc_health_probe", "-addr=:10050"] -# resources: -# requests: -# cpu: 50m -# memory: 64Mi -# limits: -# cpu: 500m -# memory: 512Mi -# --- -# apiVersion: v1 -# kind: Service -# metadata: -# name: altoservice -# labels: -# app: altoservice -# spec: -# type: ClusterIP -# selector: -# app: altoservice -# ports: -# - name: http -# protocol: TCP -# port: 5000 -# targetPort: 5000 -# - name: grpc -# protocol: TCP -# port: 10050 -# targetPort: 10050 -# - name: metrics -# protocol: TCP -# port: 9192 -# targetPort: 9192 - - -# version: '3.1' - -# services: -# docker_alto: -# image: docker_alto -# container_name: docker_alto -# ports: -# - "5000:5000" -# restart: unless-stopped diff --git a/src/alto/00-alto-light.yaml b/src/alto/00-alto-light.yaml deleted file mode 100644 index 69404be12e4929e50320579ed3c6f7eebaf7ffc4..0000000000000000000000000000000000000000 --- a/src/alto/00-alto-light.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: alto-light -spec: - replicas: 1 - selector: - matchLabels: - app: alto-light - template: - metadata: - labels: - app: alto-light - spec: - containers: - - name: alto-light - image: docker.io/library/alto_light:latest - imagePullPolicy: IfNotPresent - ports: - - containerPort: 5000 ---- -apiVersion: v1 -kind: Service -metadata: - name: alto-light -spec: - selector: - app: alto-light - ports: - - protocol: TCP - port: 5000 - targetPort: 5000 - type: LoadBalancer diff --git a/src/alto/Dockerfile b/src/alto/Dockerfile deleted file mode 100644 index 1764e6d6110b5c5aa92134b69638da0d4d0e4973..0000000000000000000000000000000000000000 --- a/src/alto/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Utiliza una imagen base con Python instalado -FROM python:3.7-alpine - -# Download the gRPC health probe -#RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ -# wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ -# chmod +x /bin/grpc_health_probe - -# Copia el código fuente de tu aplicación al contenedor -COPY /src/alto/service/ /altoservice/ -#COPY service/ /altoservice/ - -# Establece el directorio de trabajo en /app -WORKDIR /altoservice - -# Instalamos las dependencias -RUN pip3 install -r requirements.txt - -# Ejecuta tu código Python -CMD [ "python3", "alto_core.py" ] diff --git a/src/alto/alto_light.tar b/src/alto/alto_light.tar deleted file mode 100644 index 414f0d835cdae82a8d8c92d92170a5fe1ee70bd0..0000000000000000000000000000000000000000 Binary files a/src/alto/alto_light.tar and /dev/null differ diff --git a/src/alto/docker-compose.yaml b/src/alto/docker-compose.yaml deleted file mode 100644 index 5046ec12417a185ae22cc93f6cefff4c68bad9de..0000000000000000000000000000000000000000 --- a/src/alto/docker-compose.yaml +++ /dev/null @@ -1,8 +0,0 @@ -version: '3.1' - -services: - docker_alto: - image: alto_light - container_name: alto_light:latest - ports: - - "5000:5000" diff --git a/src/alto/service/README.md b/src/alto/service/README.md deleted file mode 100644 index 97467212bb2dd9575229ba2ada08e66c604e0930..0000000000000000000000000000000000000000 --- a/src/alto/service/README.md +++ /dev/null @@ -1,153 +0,0 @@ - - - - -## Tabla de contenidos -1. [Descripción general](#descripcion-general) -2. [Lista de archivos](#lista-de-archivos) -3. [JerarquÃa de archivos](#jerarquÃa-de-archivos) -4. [Versiones](#versiones) -5. [ToDo list](#todo-list) - - -### Descripción general -Directorio creado para realizar una revisión del código para ver si lo podemos adaptar a un contexto en el cual se sincronice con una cola kafka. -Este sistema idealmente serÃa distribuÃdo y tolerante a fallos. - -El principal objetivo de este directorio es toquetear el código sin fastidiar nada de lo que han hecho los compañeros previamente, valiéndonos de una copia. - -Debemos además realizar la evaluación de qué tenemos que implementar para que tengamos un servidor ALTO plenamente funcional. -Revisar cómo pasar a python los trabajos que hemos hablado Contreras, Rafa y yo. - -### Lista de archivos - -* exponsure.py: Proporciona una API de acceso vÃa HTTP a los servicios definidos en el RFC7285. -* topology_maps_creator.py: Lanza los módulos ALTO disponible y recibe la información de topologÃa a partir de una interfaz habilitada para ello. Si se le indica al ejecutarlo, crea además una cola Kafka donde publicará la información de topologÃa de red. -* yang_alto.py: Estandariza la salida de la información siguiendo un esquema YANG y en formato JSON. -* kafka_ale: - * launcher: Archivo sh que ejecuta tanto el entorno zookeaper como la cola Kafka. - * kafka_api.py: Proporciona una API python para trabajar con la cola definida. - * resto de carpetas y archivos: Archivos de gestión para Kafka y Zookeaper. -* Modulos: - * alto_module.py: Clase abstracta que define el comportamiento de los distintos ALTO modules. El objetivo es tener una API conjunta de manera que todos los módulos tengan la misma base y las mismas funciones principales para exportar la información recibida. - * topology_bgp.py: Módulo ALTO que procesa la información recibida vÃa BGP. - * topology_ietf.py: Módulo ALTO que procesa la información recibida del PCE. -* ../bgp/manage_bgp_speaker.py: Speaker del protocolo GBP. Implementa el proceso exabgp para recopilar información de red. No ha sido modificado pero lo debemos tener en cuenta dado que dependemos de él. -* realizar_git: script de actualización del repositorio git. Tengo que modificarlo para pasarlo al git oficial de Telefónica. - -Archivos en desuso (/desuso/): -* api_pybatfish.py: Intento de integración con bathfish. Se canceló al ver que no sacábamos una ventaja de esta integración. No se ha eliminado al existir aún la posibilidad de reutilizar esa idea más adelante. -* topology_maps_creator_isis.py: Similar al topology_maps_creator.py pero utilizando ISIS como E-BGP. Se ha dejado de lado en las primeras versiones (no llegó a la fase de ramificaciones) debido a que no exportaba bien la topologÃa de red. Actualmente con el funcionamiento modular podrÃa servir como germen de un módulo nuevo, pero aún no se ha evaluado cómo realizar esto o si sale rentable. -* launcher_batfish: script de activación del pybatfish. Idem a api_pybatfish.py. -* modulos/topology_maps_generator.py.bk: Copia de seguridad de la última versión unificada del generador original. - - - -### JerarquÃa de archivos - -+ exponsure --> API de exposición en red de ALTO. - + alto_generator --> expone: funciones RFC7285 desarrolladas. - + topologia_ietf: expone: grafo de red. - + ? - + topologia_bgp --> expone: grafo de red. - + bgp.manager_bgp_speaker --> expone: info periódica de actualizaciones BGP. - + yang_alto --> expone: formateo de datos a YANG JSON. - (+) kafka_api --> expone: capacidad de enviar a una cola kafka la info recibida. -+ launcher --> Activa la opción con kafka_api - - - -### Versiones - -v3.0 (en proceso) -IncluÃmos las funcionalidades de la versión 2 aplicándoselo también si la fuente es IETF en vez de BGP. -Falta: - - Terminar la definición de propiedades. - - Una vez esté todo correcto, extraer tanto el lector bgp como el lector ietf a dos archivos distintos. - - Modificar las funciones auxiliares para que puedan utilizarse en ambos tipos de topologÃa. - - -v2.1 -Añadimos todos los servicios ofrecidos por ALTO en el rfc7285: -* Map-Filtering Service: Realiza un resumen del mapa de costes o PIDs a partir de un parámetro pasado. -* Endpoint Property Service: Devuelve un JSON con las caracterÃsticas del nodo solicitado. -* Endpoint Cost Service: Devuelve el mapa de costes del Endpoint solicitado. -* Map Service: Servicio que se ha definido por defecto. Devuelve los dos mapas que se generaban hasta ahora. - -- Modificado el formato de los networkmap para que indiquen no solo las IPs, sino también el tipo de IP, tal y como se especifica en el RFC7285. -- Creado un archivo que sirva de codificador json-yang. Falta: - - Casos que aún no están implementados tampoco están formateados (por pereza, por poder lo podrÃa haber hecho). - - Realizar pruebas con más de 1 prefijo por PID en el networkmap. - - Seguir revisando condiciones del RFC. - -v2.0 -Dividimos el proyecto en dos: -- topology_maps_generator_http.py: Orientado a un servicio C/S. Se busca exponer funciones que sean accesibles desde el exterior. -- topology_maps_generator_kafka.py: Orientado a una exposición unidireccional. - -Para simplificar el desarrollo se puede realizar todo en la misma rama pero incluyendo un argumento que elija la versión para lanzar. -Esta versión va en paralelo con la v1.2, de manera que aclopará esos cambios. - - *** - Hay que crear funciones para exponer los servicios que prestamos: - - Multimaps (DONE) - - CostesCifrados (?) - - Echo (para debugin) (DONE) - Cada conexión http un hilo (?) --> Hay que revisar documentación, cómo crear el servicio, cómo exponerlo (en claro o bajo SSL), ... (DONE: versión inicial) - Importante --> Lista de puntos pendientes - *** -Servicios expuestos: multipath, costs, pids, best path - - -PARA FUTURAS VERSIONES: Posibilidad de devolver un PDF de un grafo de red. - -v1.2 (en desarrollo) -Revisar la viabilidad de utilizar el batfish. -Incluir la opción de mostrar todos los caminos disjuntos para llegar de un punto a otro de la red delimitada. (done) -Hacer que la opción de caminos disjuntos sea preguntable desde un externo: Problema, la cola Kafka es unidireccional (dos opciones, realizar un branch sin colas kafka o utilizar otro método de exposición) - - -v1.1 -Ciframos el PID de los nodos con un hash sha3 de 384bits, utilizando un timestamp como salt. Para evitar nombres muy largos acotamos a los 32 caracteres más significativos -(Dado que la intención es enmascarar la IP, no se pierde seguridad al reducir los bits mostrados) -En paralelo Fer ha estado incluyendo conexiones ponderadas. - - -v1.0 -Instalamos una cola kafka activable mediante ./kafka_ale/launcher. Dependencias: Zookeper -Creamos un archivo python que servirá de API para trabajar con la cola kafka. -Versión inicial: 2 colas (mapa_costes y mapa_pids). - -Falta por definir en fase 2: -- Que la cola solamente almacene los 2-3 registros más recientes (no hacen falta más al no consumirse al ser accedidos a ellos). -- Accesible mediante conexión SSL. -- Posibilidad de colas distribuÃdas. -- Script de gestión de fallos. - -Además, hemos sustituÃdo el grafo Direcional simple por un grafo unidireccional Múltiple (permite más de 1 enlace entre dos nodos). - - -v0.1 -Hemos modificado el código de topology_maps_generator.py y topology_maps_generator_isis.py para que si se cae un nodo no generen una excepción, sino que eliminen los enlaces que han desaparecido. -La funcionalidad básica está correcta en ospf pero en isis no se propagan las solicitudes a routers no colindantes. - - -### ToDo List - -*** - -1. Proceso de revisión continua de los servicios definidos. -2. Implementar el servicio a través de SSL en vez de HTTP. -3. Pulir la exposición a través de Kafka. -4. Evaluar ALTO de Infraestructura con LLDP. -5. Parsear todas las respuestas a formato YANG. -6. Terminar la integración con la información IETF. -7. Realizar una recepción modular y paralelizada de la información. -8. Buscar cómo fusionar toda la información sin crear un frankenstein. -9. Realizar una exportación multicoste. - -*** - - - - diff --git a/src/alto/service/__init__.py b/src/alto/service/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/alto_core.py b/src/alto/service/alto_core.py deleted file mode 100644 index c8f69fa9075d4d5da662bc6c693df08bc6862d6e..0000000000000000000000000000000000000000 --- a/src/alto/service/alto_core.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python3 - -import os -import json -import re -import networkx -import socket -import struct -import hashlib -import threading - -from time import sleep -from datetime import datetime -#sys.path.append('/home/ubuntu/docker-alto/network_exposure/') -#from bgp.manage_bgp_speaker import ManageBGPSpeaker -#sys.path.append('alto-ale/') -#from kafka_ale.kafka_api import AltoProducer -#from api_pybatfish import BatfishManager -from yang_alto import RespuestasAlto -from ipaddress import ip_address, IPv4Address -#from modulos.topology_bgp import TopologyBGP -from modulos.topology_ietf import TopologyIetf -from api.web.alto_http import AltoHttp - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, modules, mode): - self.d_modules = modules - #self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - #if mode: - # self.kafka_p = AltoProducer("localhost", "9092") - self.__http = AltoHttp(self) - self.h_thread = threading.Thread(target=self.__http.run) - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - #self.bfm = BatfishManager() - self.vtag = 0 - self.resp = RespuestasAlto() - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - - - ### RFC7285 functions - def get_costs_map_by_pid(self, pid): - #pid = "pid0:" + str(npid) - #print(pid) - #print(str(self.pids)) - if pid in self.cost_map.keys(): - #print(str(self.pids)) - #print(str(self.cost_map)) - return self.resp.crear_respuesta("filtro", "networkmap-default", self.vtag, str(self.cost_map[pid])) - else: - return "404: Not Found" - - def get_properties(self, pid): - #return str(self.bf.session.q.nodeProperties().answer().frame()) - return "Implementation in proccess. Sorry dude" - - def get_endpoint_costs(self, pid): - return "Implementation in proccess. Sorry dude" - - def get_maps(self): - return ('{"pids_map":' + self.get_pids() + ', "costs_map":' + self.get_costs_map() + '}') - - def get_costs_map(self): - return self.resp.crear_respuesta("cost-map", "networkmap-default", self.vtag, str(self.cost_map)) - - def get_pids(self): - return self.resp.crear_respuesta("pid-map", "networkmap-default", self.vtag, str(self.pids)) - - def get_directory(self): - return self.resp.indice() - - ### Ampliation functions - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - ### Manager function - def gestiona_info(self, fuente): - if fuente in self.d_modules.keys(): - self.d_modules[fuente].manage_topology_updates() - - def mailbox(self): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.bind(('localhost',8081)) - print("Waiting...") - self.h_thread.start() - while 1: - topo = s.recv(16384) - topo = topo.decode() - #print(topo) - # Aquà se deben de gestionar los datos recibidos - # Recibe una lista de nodos, una lista de ejes (con pesos), un indicador de la métrica pasada y la funete. - # Los nodos ya deben estar parseados según el AS. - #if asn != '': - #self.__compute_costmap(int(asn), topology) - #self.__compute_netmap(int(asn), pids) - #self.__vtag = str(int(datetime.timestamp(datetime.now())*1000000)) - #self.evaluate_endpoints() - #self.__topology = topology - #self.topology_writer.write_same_ips(self.router_ids) - #self.__topology_writer.write_pid_file(self.__filter_net_map(1)) - #self.__topology_writer.write_cost_map(self.__filter_cost_map(1)) - #print("Nodes loaded:\t" ,str(self.__cost_map.keys())) - try: - datos = json.loads(str(topo).replace('\t', '').replace('\n', '').strip()) - ejes = datos["data"]["costs-list"] - nodos = datos["data"]["nodes-list"] - for nodo in nodos: - self.topology.add_node(nodo) - for eje in ejes: - #print(eje) - leje = eval(eje.replace("(","[").replace(")","]")) - self.topology.add_edge(leje[0], leje[1], weight=leje[2]) - self.compute_costmap() - #print("Todo correcto Hulio") - #self.__comput-e_netmap(int(asn), pids) - except: - print("Error al procesar:\n", str(topo)) - #print("netmap:\t" + str(datos["data"]["pids"]).replace("'",'"')) - #print("costmap:\t" + str(self.cost_map).replace("'",'"')) - self.__http.detener() - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - -### Aux clases ### -class TopologyUpdateThread(threading.Thread): - - def __init__(self, topo_manager): - threading.Thread.__init__(self) - self.__tp_mng = topo_manager - - def run (self): - t,a,p,c = self.__tp_mng.manage_bgp_speaker_updates() - return t,a,p,c - -### Aux clases ### -class TopologyExpoThread(threading.Thread): - - def __init__(self, a): - threading.Thread.__init__(self) - self.alto = a - - def run (self): - self.alto.get_api_web_http().run() - - - - - -if __name__ == '__main__': - '''speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process,0) - topology_creator.manage_ietf_speaker_updates() - ''' - modules={} - #modules['bgp'] = TopologyBGP(('localhost',8081)) - modules['ietf'] = TopologyIetf(('localhost',8081)) - - alto = TopologyCreator(modules, 0) - - threads = list() - for modulo in modules.keys(): - print(modulo) - x = threading.Thread(target=alto.gestiona_info, args=(modulo,))#, daemon=True) - threads.append(x) - x.start() - - - alto.mailbox() - - - #Inclusión de prueba diff --git a/src/alto/service/api/web/alto_http.py b/src/alto/service/api/web/alto_http.py deleted file mode 100644 index 600319cf93a8f2329ce4c78d312137d6c639c5a6..0000000000000000000000000000000000000000 --- a/src/alto/service/api/web/alto_http.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python3 - -#import threading -import flask -from werkzeug.serving import make_server - -#class AltoHttp(threading.Thread): -class AltoHttp(): - - def __init__(self, a): - #threading.Thread.__init__(self) - self.app = flask.Flask("http") - self.app.config["DEBUG"] = True - self.alto = a - self.app.route('/', methods=['GET'])(self.home) - self.app.route('/costmap/filter/<string:pid>', methods=['GET'])(self.api_costs_by_pid) - self.app.route('/properties/<string:pid>', methods=['GET'])(self.api_properties) - self.app.route('/costmap/<string:pid>', methods=['GET'])(self.api_endpoint_costs) - self.app.route('/maps', methods=['GET'])(self.api_maps) - self.app.route('/costmap', methods=['GET'])(self.api_costs) - self.app.route('/networkmap', methods=['GET'])(self.api_pids) - self.app.route('/directory', methods=['GET'])(self.api_directory) - self.app.route('/all/<string:a>/<string:b>', methods=['GET'])(self.api_all) - self.app.route('/best/<string:a>/<string:b>', methods=['GET'])(self.api_shortest) - self.server = None - - def run(self): - #self.app.run(host="127.0.0.1", port=5000) - self.server = make_server('0.0.0.0', 5000, self.app) - #self.server = make_server('192.168.165.193', 8080, self.app) - print("API running on " + "\x1b[1;34m" +"http://127.0.0.1:5000" + "\x1b[1;37;40m") - self.server.serve_forever() - - def detener(self): - self.server.shutdown() - - #@self.app.route('/', methods=['GET']) - def home(self): - return ''' - <h1>ALTO PoC's API</h1> - <h2>Services expossed:</h2> - <p><ul> - <li>All disjunts paths between A & B: <b><tt> /all/<string:a>/<string:b> </b></tt></li> - <li>Shortest path between A & B: <b><tt> /best/<string:a>/<string:b> </b></tt></li> - <li>Costs map: /costmap </li> - <li>PIDs map: /networkmap </li> - <li>Filtered Cost map: /costmap/filter/<string:pid></li> - </ul></p> - ''' - - ################################### - ## ## - # Services defined in RFC 7285 # - ## ## - ################################### - - # Map-Filteriong Service - #@self.app.route('/costmap/filter/<string:pid>', methods=['GET']) - def api_costs_by_pid(self, pid): - return flask.jsonify(self.alto.get_costs_map_by_pid(pid)) - - #Endpoint Property Service - #@self.app.route('/properties/<string:pid>', methods=['GET']) - def api_properties(self, pid): - return flask.jsonify(self.alto.get_properties(pid)) - - #Endpoint Cost Service - #@self.app.route('/costmap/<string:pid>', methods=['GET']) - def api_endpoint_costs(self, pid): - return flask.jsonify(self.alto.get_endpoint_costs(pid)) - - #Map Service - #@self.app.route('/maps', methods=['GET']) - def api_maps(self): - return flask.jsonify(self.alto.get_maps()) - - #Network Map service - #@self.app.route('/costmap', methods=['GET']) - def api_costs(self): - return flask.jsonify(self.alto.get_costs_map()) - - #@self.app.route('/networkmap', methods=['GET']) - def api_pids(self): - return flask.jsonify(self.alto.get_net_map()) - #return flask.jsonify(self.alto.get_pids()) - - #@self.app.route('/directory', methods=['GET']) - def api_directory(self): - return flask.jsonify(self.alto.get_directory()) - - ################################### - ## ## - # Ampliations # - ## ## - ################################### - - - #All possible paths between A and B without any common node - #@self.app.route('/all/<string:a>/<string:b>', methods=['GET']) - def api_all(self, a,b): - return flask.jsonify(self.alto.parseo_yang(str(self.alto.all_maps(self.alto.get_topology(), a, b)),"all-paths")) - - #Best path between A and B - #@self.app.route('/best/<string:a>/<string:b>', methods=['GET']) - def api_shortest(self, a,b): - return flask.jsonify(str(self.alto.shortest_path(a, b))) - - -if __name__ == '__main__': - #Creation of ALTO modules - '''modules={} - modules['bgp'] = TopologyBGP(('localhost',8888)) - #modules['ietf'] = TopologyIetf(('localhost',8081)) - alto = TopologyCreator(modules, 0) - hilos = alto.lanzadera() - - hilo = HiloHTTP() - hilo.start() - hilos.append(hilo) - sleep(30) - alto.get_costs_map() - ''' - #speaker_bgp = ManageBGPSpeaker() - #exabgp_process = speaker_bgp.check_tcp_connection() - #alto = TopologyCreator(exabgp_process,0) - #hilo = HiloHTTP() - #hilo.start() - #app.run(host='192.168.165.193', port=8080) - #app.run() diff --git a/src/alto/service/api/web/alto_https.py b/src/alto/service/api/web/alto_https.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/api/web/certs/README.md b/src/alto/service/api/web/certs/README.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/costmap b/src/alto/service/costmap deleted file mode 100644 index dbe2165795f47383a38074126cf0765e484ba177..0000000000000000000000000000000000000000 --- a/src/alto/service/costmap +++ /dev/null @@ -1 +0,0 @@ -"{'meta':{'type':'alto-costmap+json','dependent-vtag':[{'resource-id':'networkmap-default','tag': '3d6155fc6e40ddad265441c844bdc9cf086eec2f6b2cabc7a5f72d038a0f189e'}],'cost-type': {'cost-mode' : 'numerical','cost-metric' : 'routingcost'}},'cost-map':{'pid0:0a0a0a05': {'pid0:0a0a0a05': 0, 'pid0:0a0a0a04': 1, 'pid0:0a0a0a02': 2, 'pid0:0a0a0a03': 2, 'pid0:0a0a0a01': 2, 'pid0:0a0a0a06': 3}, 'pid0:0a0a0a04': {'pid0:0a0a0a04': 0, 'pid0:0a0a0a02': 1, 'pid0:0a0a0a03': 1, 'pid0:0a0a0a01': 1, 'pid0:0a0a0a05': 1, 'pid0:0a0a0a06': 2}, 'pid0:0a0a0a03': {'pid0:0a0a0a03': 0, 'pid0:0a0a0a04': 1, 'pid0:0a0a0a06': 1, 'pid0:0a0a0a02': 2, 'pid0:0a0a0a01': 2, 'pid0:0a0a0a05': 2}, 'pid0:0a0a0a01': {'pid0:0a0a0a01': 0, 'pid0:0a0a0a04': 1, 'pid0:0a0a0a06': 1, 'pid0:0a0a0a02': 2, 'pid0:0a0a0a03': 2, 'pid0:0a0a0a05': 2}, 'pid0:0a0a0a06': {'pid0:0a0a0a06': 0, 'pid0:0a0a0a02': 1, 'pid0:0a0a0a03': 1, 'pid0:0a0a0a01': 1, 'pid0:0a0a0a04': 2, 'pid0:0a0a0a05': 3}, 'pid0:0a0a0a02': {'pid0:0a0a0a02': 0, 'pid0:0a0a0a04': 1, 'pid0:0a0a0a06': 1, 'pid0:0a0a0a03': 2, 'pid0:0a0a0a01': 2, 'pid0:0a0a0a05': 2}}}" diff --git a/src/alto/service/desuso/api-alto/README.md b/src/alto/service/desuso/api-alto/README.md deleted file mode 100644 index be631efa09bfdb823db8a8a91cc5ae8b0d4bb02c..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/api-alto/README.md +++ /dev/null @@ -1,7 +0,0 @@ - - - - - -Versión más profesionalizada del servidor ALTO que expondremos. La idea es utilizar una versión preliminar al comienzo y una vez funcione migrar aquÃ. -Referencias: https://j2logo.com/flask/tutorial-como-crear-api-rest-python-con-flask/ diff --git a/src/alto/service/desuso/api-alto/app/db.py b/src/alto/service/desuso/api-alto/app/db.py deleted file mode 100644 index 0c26edab9202f10648f3fdc751e0a5ca74348371..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/api-alto/app/db.py +++ /dev/null @@ -1,26 +0,0 @@ -from flask_sqlalchemy import SQLAlchemy - -db = SQLAlchemy() - - -class BaseModelMixin: - - def save(self): - db.session.add(self) - db.session.commit() - - def delete(self): - db.session.delete(self) - db.session.commit() - - @classmethod - def get_all(cls): - return cls.query.all() - - @classmethod - def get_by_id(cls, id): - return cls.query.get(id) - - @classmethod - def simple_filter(cls, **kwargs): - return cls.query.filter_by(**kwargs).all() diff --git a/src/alto/service/desuso/api-alto/app/ext.py b/src/alto/service/desuso/api-alto/app/ext.py deleted file mode 100644 index e1aeab08f17e67675527447b6766ac5278a19638..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/api-alto/app/ext.py +++ /dev/null @@ -1,5 +0,0 @@ -from flask_marshmallow import Marshmallow -from flask_migrate import Migrate - -ma = Marshmallow() -migrate = Migrate() diff --git a/src/alto/service/desuso/api-alto/app/maps/api_v0_1/__init__.py b/src/alto/service/desuso/api-alto/app/maps/api_v0_1/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/desuso/api-alto/app/maps/api_v0_1/resources.py b/src/alto/service/desuso/api-alto/app/maps/api_v0_1/resources.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/desuso/api-alto/app/maps/api_v0_1/schemas.py b/src/alto/service/desuso/api-alto/app/maps/api_v0_1/schemas.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/desuso/api-alto/app/maps/models.py b/src/alto/service/desuso/api-alto/app/maps/models.py deleted file mode 100644 index af41704937effb6d1ac74fbedee300bbf0f8c496..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/api-alto/app/maps/models.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - - - -''' -Aquà se van a definir los modelos de datos con losque trataremos. - - -''' diff --git a/src/alto/service/desuso/api_pybatfish.py b/src/alto/service/desuso/api_pybatfish.py deleted file mode 100644 index 1768116aa299ce6c5590c90dd305d70f6454cc14..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/api_pybatfish.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import pandas as pd -from pybatfish.client.session import Session -from pybatfish.datamodel import * -from pybatfish.datamodel.answer import * -from pybatfish.datamodel.flow import * - - -# API generada a partir de la información obtenida de: https://pybatfish.readthedocs.io/en/latest/index.html - - -class BatfishManager: - - def __init__(self, shost='localhost', sname='default', netw='alto'): - self.session = Session(host=shost) - self.login = logging.getLogger("pybatfish").setLevel(logging.WARN) - self.snapshot = {'dir': '/root/cdn-alto/alto-ale/pruebas/', 'name' : str(sname) } - self.network = str(netw) - - self.session.set_network(self.network) - self.session.init_snapshot(self.snapshot['dir'], self.snapshot['name'], overwrite=True) - - - def getSession(self): - return self.session diff --git a/src/alto/service/desuso/exponsure.py.bk b/src/alto/service/desuso/exponsure.py.bk deleted file mode 100644 index 7bbb0775e5982053650c28bb43e9ae3183ebff34..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/exponsure.py.bk +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env python3 - - -import os -import sys -import json -import threading -import flask -from time import sleep -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from emergencia.topology_maps_generator import TopologyCreator -from emergencia.topology_maps_generator import TopologyFileWriter -from modulos.topology_bgp import TopologyBGP -from modulos.topology_ietf import TopologyIetf - - - -class HiloHTTP (threading.Thread): - - def __init__(self): - threading.Thread.__init__(self) - #self.tc = topology_creator - - def run (self): - alto.manage_bgp_speaker_updates(0) - #alto.mailbox(8888) - - -#Código global -app = flask.Flask(__name__) -app.config["DEBUG"] = True -@app.route('/', methods=['GET']) -def home(): - return ''' - <h1>API DE ACCESO AL SERVICE ALTO DE PRUEBAS</h1> - <h2>Servicios disponibles:</h2> - <p><ul> - <li>Todos los camimos disjuntos entre A y B: <b><tt> /all/<string:a>/<string:b> </b></tt></li> - <li>Camino más corto entre A y B: <b><tt> /best/<string:a>/<string:b> </b></tt></li> - <li>Mapa de costes: /costs </li> - <li>Mapa de PIDs: /pids </li> - </ul></p> - - - ''' - -################################### -## ## -# Services defined in RFC 7285 # -## ## -################################### - -# Map-Filteriong Service -@app.route('/costmap/filter/<string:pid>', methods=['GET']) -def api_costs_by_pid(pid): - return flask.jsonify(alto.get_costs_map_by_pid(pid)) - -#Endpoint Property Service -@app.route('/properties/<string:pid>', methods=['GET']) -def api_properties(pid): - return flask.jsonify(alto.get_properties(pid)) - -#Endpoint Cost Service -@app.route('/costmap/<string:pid>', methods=['GET']) -def api_endpoint_costs(pid): - return flask.jsonify(alto.get_endpoint_costs(pid)) - -#Map Service -@app.route('/maps', methods=['GET']) -def api_maps(): - return flask.jsonify(alto.get_maps()) - -#Network Map service -@app.route('/costmap', methods=['GET']) -def api_costs(): - return flask.jsonify(alto.get_costs_map()) - -@app.route('/networkmap', methods=['GET']) -def api_pids(): - return flask.jsonify(alto.get_pids()) - -@app.route('/directory', methods=['GET']) -def api_directory(): - return flask.jsonify(alto.get_directory()) - - -################################### -## ## -# Ampliations # -## ## -################################### - - -#All possible paths between A and B without any common node -@app.route('/all/<string:a>/<string:b>', methods=['GET']) -def api_all(a,b): - return flask.jsonify(alto.parseo_yang(str(alto.all_maps(alto.topology, a, b)),"all-paths")) - -#Best path between A and B -@app.route('/best/<string:a>/<string:b>', methods=['GET']) -def api_shortest(a,b): - return flask.jsonify(str(alto.shortest_path(a, b))) - -if __name__ == '__main__': - #Creation of ALTO modules - '''modules={} - modules['bgp'] = TopologyBGP(('localhost',8888)) - #modules['ietf'] = TopologyIetf(('localhost',8081)) - alto = TopologyCreator(modules, 0) - hilos = alto.lanzadera() - - hilo = HiloHTTP() - hilo.start() - hilos.append(hilo) - sleep(30) - alto.get_costs_map() - ''' - - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - alto = TopologyCreator(exabgp_process,0) - - hilo = HiloHTTP() - hilo.start() - #app.run(host='192.168.165.193', port=8080) - app.run() diff --git a/src/alto/service/desuso/launcher_batfish b/src/alto/service/desuso/launcher_batfish deleted file mode 100644 index 1cc637f5b224c95dbb8c559f49d9f7faab1eae08..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/launcher_batfish +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -#Tengo que revisar esto por que no funciona ni de lejos lo del trap --> Revisar lo de trap por que no aparece el man trap y puede ir por ahà la vaina - -matemos(){ - echo "Matemos" - kill -9 $AMATAR - docker stop batfish - docker rm batfish - echo "Todo muerto" - SEGUIR=0 -} - -SEGUIR=1 -docker pull batfish/allinone -docker run --name batfish -v batfish-data:/data -p 8888:8888 -p 9997:9997 -p 9996:9996 batfish/allinone & - -AMATAR=$! - -trap 'matemos' 2 - -while [[ $SEGUIR == 1 ]]; -do - sleep 1 -done diff --git a/src/alto/service/desuso/topology_general.py b/src/alto/service/desuso/topology_general.py deleted file mode 100644 index 65dede0c1bb7b2ed917735696613a6f382012b72..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/topology_general.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, exabgp_process, mode): - self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - if mode: - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - if pid_name not in self.pids: - self.pids[pid_name] = [] - if prefix not in self.pids[pid_name]: - self.pids[pid_name].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - def get_costs_map(self): - return str(self.cost_map) - - def get_pids(self): - return str(self.pids) - - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - def manage_bgp_speaker_updates(self, mode): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - while True: - line = self.exabgp_process.stdout.readline().strip() - if b'decoded UPDATE' in line and b'json' in line: - #print(line) - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - igp_metric = update_msg['attribute'].get('bgp-ls', {}).get("igp-metric", 1) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - if self.discard_message_from_protocol_id(prefix, [4, 5]): - continue - self.load_topology(prefix, igp_metric) - self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - self.load_pids(pids_to_load) - - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - - if bool(self.cost_map) : - if mode: - self.kafka_p.envio_alto('alto-costes', self.cost_map, 0) - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - -if __name__ == '__main__': - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process,1) - topology_creator.manage_bgp_speaker_updates(1) diff --git a/src/alto/service/desuso/topology_maps_generator.py.bk b/src/alto/service/desuso/topology_maps_generator.py.bk deleted file mode 100644 index f1a0b9ba4d899e9b2f7e6a33ee47aeb73167b4b8..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/topology_maps_generator.py.bk +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer -#from api_pybatfish import BatfishManager -from yang_alto import RespuestasAlto -from ipaddress import ip_address, IPv4Address - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, exabgp_process, mode): - self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - if mode: - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - #self.bfm = BatfishManager() - self.vtag = 0 - self.resp = RespuestasAlto() - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - - - ### RFC7285 functions - def get_costs_map_by_pid(self, pid): - #pid = "pid0:" + str(npid) - #print(pid) - #print(str(self.pids)) - if pid in self.cost_map.keys(): - #print(str(self.pids)) - #print(str(self.cost_map)) - return self.resp.crear_respuesta("filtro", "networkmap-default", self.vtag, str(self.cost_map[pid])) - else: - return "404: Not Found" - - def get_properties(self, pid): - #return str(self.bf.session.q.nodeProperties().answer().frame()) - return "Implementation in proccess. Sorry dude" - - def get_endpoint_costs(self, pid): - return "Implementation in proccess. Sorry dude" - - def get_maps(self): - return ('{"pids_map":' + self.get_pids() + ', "costs_map":' + self.get_costs_map() + '}') - - def get_costs_map(self): - return self.resp.crear_respuesta("cost-map", "networkmap-default", self.vtag, str(self.cost_map)) - - def get_pids(self): - return self.resp.crear_respuesta("pid-map", "networkmap-default", self.vtag, str(self.pids)) - - def get_directory(self): - return self.resp.indice() - - ### Ampliation functions - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - - - ### Manager function - - def manage_bgp_speaker_updates(self, mode): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - while True: - line = self.exabgp_process.stdout.readline().strip() - if b'decoded UPDATE' in line and b'json' in line: - #print(line) - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - igp_metric = update_msg['attribute'].get('bgp-ls', {}).get("igp-metric", 1) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - if self.discard_message_from_protocol_id(prefix, [4, 5]): - continue - self.load_topology(prefix, igp_metric) - self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - self.load_pids(pids_to_load) - - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - - if bool(self.cost_map) : - if mode: - self.kafka_p.envio_alto('alto-costes', self.cost_map, 0) - - def manage_ietf_speaker_updates(self): - ''' - Receives topology information from the PCE by the Southaband Interface and creates/updates the graphs - Realizes an iterational analisis, reviewing each network: if two networks are the same but by different protocols, they must to be merged. - Three attributes on each network: dic[ips], dic[interfaces] and graph[links] - ''' - #Diccionario nodo-id:nombre - nodos = {} - #Diccionario nodo-id:[(interfaz, ip)] - tps = {} - #Lista de enlaces - links = [] - full_path = os.path.join("/root/", "ietf_prueba.json") - with open(full_path, 'r') as archivo: - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - #while True: - deluro = archivo.read() - d_json = json.loads(str(deluro)) - #print("Tipo = " + str(type(d_json)) + "\nMensaje = " + str(d_json)) - ietf_networks = d_json["ietf-network:networks"] - if ietf_networks == '': - return - #Creo un diccionario con todas las redes que hay y lo recorro para buscar las válidas - for net in ietf_networks["network"]: - if "node" in net.keys() and "ietf-network-topology:link" in net.keys(): - for nodo in net["node"]: - #Realizo un macheo de los IDs de los nodos con el nombre y el/los prefijo/s. - nodos[nodo["node-id"]] = nodo["ietf-l3-unicast-topology:l3-node-attributes"]["name"] - tps[nodo["node-id"]] = [] - if "ietf-network-topology:termination-point" in nodo.keys(): - for tp in nodo["ietf-network-topology:termination-point"]: - tps[nodo["node-id"]].append(str(nodos[nodo["node-id"]]) + ' ' + str(tp["tp-id"])) - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(nodo["node-id"])) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if 'ipv4' not in self.pids[pid_name]: - self.pids[pid_name]['ipv4']=[] - if nodo['node-id'] not in self.pids[pid_name]['ipv4']: - self.pids[pid_name]['ipv4'].append( nodo['node-id']) - self.topology.add_node(nodo['node-id']) - - # Falta listar los enlaces y guardarlos. - for link in net["ietf-network-topology:link"]: - a,b = link["link-id"].split(" - ") - if a == '' or b == '': - break - a1 = a.split(' ')[0] - b1 = b.split(' ')[0] - for k in nodos.keys(): - if nodos[k] == a1: - a = k - elif nodos[k] == b1: - b = k - links.append(((a,b),link["ietf-l3-unicast-topology:l3-link-attributes"]["metric1"])) - - # Una vez funciona todo, en vez de almacenarlo en diccionarios los guardamos en un grafo. -> Los nodos se pueden ir pasando ya arriba. - # Ahora mismo va todo correcto, falta pasar los a,b a PID en vez de node-id. - for link in links: - self.topology.add_edge(link[0][0], link[0][1], weight=int(link[1])) - - # Hay que revisar qué diccionarios seguirÃan haciendo falta. - # Dado que bgp lo representa con node-id - node-id, quizás es importante unificar la representación que se muestre. (done) - # Qué hacemos con las interfaces? Las mostramos en los ejes o no hace falta? Guardamos una lista de enlaces donde se vean cómo se conectan? - print("Done") - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - print(str(self.get_maps())) - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - - -if __name__ == '__main__': - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process,0) - topology_creator.manage_ietf_speaker_updates() diff --git a/src/alto/service/desuso/topology_maps_generator_isis.py b/src/alto/service/desuso/topology_maps_generator_isis.py deleted file mode 100644 index f79d938db6ca918b0f144bc2f353b5cf4e01c234..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/topology_maps_generator_isis.py +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import json -import hashlib - -from datetime import datetime -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, exabgp_process): - self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.MultiGraph() - self.cost_map = {} - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - #pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - tsn = int(datetime.timestamp(datetime.now())*1000000) - #pid_name = 'pid%d:%s:%d' % (DEFAULT_ASN, str(hash(router_id + str(ts))), ts) - rid = self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router_id + str(tsn)).encode()) - pid_name = 'pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - return [self.split_router_ids(descriptor.get(key)) for descriptor in node_descriptors] - - def load_topology(self, lsa): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if 'node-descriptors' in lsa: - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - ''' - Modificación 1.1: - En vez de utilizar los PIDs directamente se realiza un hash sha3 del router_id (su IP) con un salt (El ASN) - Modificación 1.2: - Utilizamos un timestamp con precisión de microsegundos para "sazonar" el hash. - Para mantener una coherencia con las veces previas se realizará un mapeado IP:timestamp la primera vez que se mapee el nodo. - Si un nodo se cae no tendrá problemas de duplicidad al no depender esto del grafo. - ''' - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - #pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - if self.get_hex_id(data['next-hop']) not in self.ts.keys(): - tsn = int(datetime.timestamp(datetime.now())*1000000) - self.ts[self.get_hex_id(data['next-hop'])] = tsn - else: - tsn = self.ts[self.get_hex_id(data['next-hop'])] - hash_r = hashlib.sha3_384((data['next-hop'] + str(tsn)).encode()) - pid_name = 'pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn) - if pid_name not in self.pids: - self.pids[pid_name] = [] - if prefix not in self.pids[pid_name]: - self.pids[pid_name].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src in self.pids: - sp = self.props.get(src, [(0, None)])[0][-1] - self.cost_map[src] = dict() - for dst in self.pids: - dp = self.props.get(dst, [(0, None)])[0][-1] - if sp is not None and dp is not None: - self.cost_map[src][dst] = shortest_paths.get(sp, {}).get(dp, 64) - - def manage_bgp_speaker_updates(self): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - mapa_aux={} - while True: - line = self.exabgp_process.stdout.readline().strip() - if b'decoded UPDATE' in line and b'json' in line: - #print(f"LINEA!!!! {line.split(b'json')}") - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - #Aquà tenemos que ver cómo incluir los mapas multicostes. - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - self.load_topology(prefix) - self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - self.load_pids(pids_to_load) - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - - self.compute_costmap() - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - - if bool(self.cost_map) : - self.kafka_p.envio_alto('alto-costes', self.cost_map) - - #icode = self.kafka_p.envio_alto_archivo('alto-costes', self.topology_writer.output_path, "cost_map.json") - #if not icode : - # print("No se ha podido realizar la escritura") - #else: - # print("Escritura correcta") - #self.kafka_p.envio_alto("alto-costes", self.cost_map) - #self.kafka_p.envio_alto("alto-pids", self.pids) - def shortest_path(graph): - return dict(networkx.dijkstra_path(graph)) - - def all_maps(topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.MultiGraph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - - all_paths.append([sh_path]) - sh_path = networkx.dijkstra_path(map_aux, src, dst) - - return all_paths - - -''' -Next to do: - - Code that uses other metrics than nº of jumps (ponderated edges or something like that) - - Evaluate the functionality once the environment is working -''' - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - -if __name__ == '__main__': - - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process) - topology_creator.manage_bgp_speaker_updates() - - - - - - - -""" - #print("N:\t", n) - for i, j in n.items(): - if i == "router-id": - print("IDs:\t", self.split_router_ids(j)) - #try: - #self.topology.remove_node(self.split_router_ids(j)) - #except: - # print("Nodo ya removido.") -""" - diff --git a/src/alto/service/desuso/zexhibitor_sucio b/src/alto/service/desuso/zexhibitor_sucio deleted file mode 100644 index fea3d7edb9db5db26d2cc76ec1ce0bab59ac6cfc..0000000000000000000000000000000000000000 --- a/src/alto/service/desuso/zexhibitor_sucio +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -' -Progreso actual: - Somos capaces de acceder a la información dinámicamente -Falta realizar: - Corregir código py para que no se caiga cada vez que un nodo falla - Sinronizarse con una cola kafka - Gestión de Kafka para que alguien lo lea - -Extra que se podrÃa hacer: - Crear el código del cliente ALTO que lo vaya a leer - Aumentar el número de mapas disponibles. -' - - -function actualizar_kafka(){ - -} - - - -function monitorizar_fichero_incremental(){ - #Monitorizamos cÃclicamente que no se hayan modificado los mapas - while inotifywait -e modify $1 1>/dev/null; do - echo "Modificaciones $1:" - diff $1 $2 - cp $1 $2 - #Actualizamos el topic $1 en la cola Kafka - - done -} - - -function monitorizar_fichero(){ - #Monitorizamos cÃclicamente que no se hayan modificado los mapas - while inotifywait -e modify $1 >/dev/null; do - if [[ "$(cat $1)" == "$(cat $2)" ]] ; then - echo $1 - cp $1 $2 - actualizar_kafka $1 - fi - done -} - - -#Creamos un fichero temporal por mapa -TEMP0=$(mktemp ".tmp.XXXXXXXXXX") -echo "temp:$TEMP0" -cp cost_map.json $TEMP0 - -TEMP1=$(mktemp ".tmp.XXXXXXXXXX") -echo "temp:$TEMP1" -cp pid_file.json $TEMP1 - - -#realizar el isis.py & y guardar el pid en $PID -python3.7 cdn-alto/alto/topology_maps_generator_isis.py & 1>/dev/null 2>>houston.log -PID1=$! -echo "PID:$PID1" - -#Si se mata el proceso, matamos los hijos y destruimos el temp -trap 'echo END;kill $PID1;rm $TEMP0;rm $TEMP1;exit' SIGINT SIGKILL - -#Monitorizamos los ficheros que nos interesan -monitorizar_fichero "cost_map.json" $TEMP0 & -monitorizar_fichero "pid_file.json" $TEMP1 & - - - - - - -while 1; do; done; - - - - - - -#Restos de código en desuso -: ' -#Monitorizamos cÃclicamente que no se hayan modificado los mapas -while inotifywait -e modify cost_map.json; do - echo "Modificaciones:" - diff cost_map.json $TEMPO - cp cost_map.json $TEMPO -done - - - -function temporales(){ - #Creamos un fichero temporal por mapa - TEMP$i=$(mktemp ".tmp.XXXXXXXXXX") - echo "temp:${TEMP$i}" - $i=$i+1 - return ${TEMP$i} -} - - - - -' - -echo FIN diff --git a/src/alto/service/emergencia/topology_maps_generator.py b/src/alto/service/emergencia/topology_maps_generator.py deleted file mode 100644 index f1a0b9ba4d899e9b2f7e6a33ee47aeb73167b4b8..0000000000000000000000000000000000000000 --- a/src/alto/service/emergencia/topology_maps_generator.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer -#from api_pybatfish import BatfishManager -from yang_alto import RespuestasAlto -from ipaddress import ip_address, IPv4Address - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, exabgp_process, mode): - self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - if mode: - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - #self.bfm = BatfishManager() - self.vtag = 0 - self.resp = RespuestasAlto() - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - - - ### RFC7285 functions - def get_costs_map_by_pid(self, pid): - #pid = "pid0:" + str(npid) - #print(pid) - #print(str(self.pids)) - if pid in self.cost_map.keys(): - #print(str(self.pids)) - #print(str(self.cost_map)) - return self.resp.crear_respuesta("filtro", "networkmap-default", self.vtag, str(self.cost_map[pid])) - else: - return "404: Not Found" - - def get_properties(self, pid): - #return str(self.bf.session.q.nodeProperties().answer().frame()) - return "Implementation in proccess. Sorry dude" - - def get_endpoint_costs(self, pid): - return "Implementation in proccess. Sorry dude" - - def get_maps(self): - return ('{"pids_map":' + self.get_pids() + ', "costs_map":' + self.get_costs_map() + '}') - - def get_costs_map(self): - return self.resp.crear_respuesta("cost-map", "networkmap-default", self.vtag, str(self.cost_map)) - - def get_pids(self): - return self.resp.crear_respuesta("pid-map", "networkmap-default", self.vtag, str(self.pids)) - - def get_directory(self): - return self.resp.indice() - - ### Ampliation functions - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - - - ### Manager function - - def manage_bgp_speaker_updates(self, mode): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - while True: - line = self.exabgp_process.stdout.readline().strip() - if b'decoded UPDATE' in line and b'json' in line: - #print(line) - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - igp_metric = update_msg['attribute'].get('bgp-ls', {}).get("igp-metric", 1) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - if self.discard_message_from_protocol_id(prefix, [4, 5]): - continue - self.load_topology(prefix, igp_metric) - self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - self.load_pids(pids_to_load) - - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - - if bool(self.cost_map) : - if mode: - self.kafka_p.envio_alto('alto-costes', self.cost_map, 0) - - def manage_ietf_speaker_updates(self): - ''' - Receives topology information from the PCE by the Southaband Interface and creates/updates the graphs - Realizes an iterational analisis, reviewing each network: if two networks are the same but by different protocols, they must to be merged. - Three attributes on each network: dic[ips], dic[interfaces] and graph[links] - ''' - #Diccionario nodo-id:nombre - nodos = {} - #Diccionario nodo-id:[(interfaz, ip)] - tps = {} - #Lista de enlaces - links = [] - full_path = os.path.join("/root/", "ietf_prueba.json") - with open(full_path, 'r') as archivo: - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - #while True: - deluro = archivo.read() - d_json = json.loads(str(deluro)) - #print("Tipo = " + str(type(d_json)) + "\nMensaje = " + str(d_json)) - ietf_networks = d_json["ietf-network:networks"] - if ietf_networks == '': - return - #Creo un diccionario con todas las redes que hay y lo recorro para buscar las válidas - for net in ietf_networks["network"]: - if "node" in net.keys() and "ietf-network-topology:link" in net.keys(): - for nodo in net["node"]: - #Realizo un macheo de los IDs de los nodos con el nombre y el/los prefijo/s. - nodos[nodo["node-id"]] = nodo["ietf-l3-unicast-topology:l3-node-attributes"]["name"] - tps[nodo["node-id"]] = [] - if "ietf-network-topology:termination-point" in nodo.keys(): - for tp in nodo["ietf-network-topology:termination-point"]: - tps[nodo["node-id"]].append(str(nodos[nodo["node-id"]]) + ' ' + str(tp["tp-id"])) - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(nodo["node-id"])) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if 'ipv4' not in self.pids[pid_name]: - self.pids[pid_name]['ipv4']=[] - if nodo['node-id'] not in self.pids[pid_name]['ipv4']: - self.pids[pid_name]['ipv4'].append( nodo['node-id']) - self.topology.add_node(nodo['node-id']) - - # Falta listar los enlaces y guardarlos. - for link in net["ietf-network-topology:link"]: - a,b = link["link-id"].split(" - ") - if a == '' or b == '': - break - a1 = a.split(' ')[0] - b1 = b.split(' ')[0] - for k in nodos.keys(): - if nodos[k] == a1: - a = k - elif nodos[k] == b1: - b = k - links.append(((a,b),link["ietf-l3-unicast-topology:l3-link-attributes"]["metric1"])) - - # Una vez funciona todo, en vez de almacenarlo en diccionarios los guardamos en un grafo. -> Los nodos se pueden ir pasando ya arriba. - # Ahora mismo va todo correcto, falta pasar los a,b a PID en vez de node-id. - for link in links: - self.topology.add_edge(link[0][0], link[0][1], weight=int(link[1])) - - # Hay que revisar qué diccionarios seguirÃan haciendo falta. - # Dado que bgp lo representa con node-id - node-id, quizás es importante unificar la representación que se muestre. (done) - # Qué hacemos con las interfaces? Las mostramos en los ejes o no hace falta? Guardamos una lista de enlaces donde se vean cómo se conectan? - print("Done") - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - print(str(self.get_maps())) - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - - -if __name__ == '__main__': - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process,0) - topology_creator.manage_ietf_speaker_updates() diff --git a/src/alto/service/exponsure.py b/src/alto/service/exponsure.py deleted file mode 100644 index 3ece3b0ecd4bd5929243c4c7e60c87063abac2cf..0000000000000000000000000000000000000000 --- a/src/alto/service/exponsure.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python3 - - -import os -import sys -import json -import threading -import flask -from time import sleep -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from topology_maps_generator import TopologyCreator -from topology_maps_generator import TopologyFileWriter -from modulos.topology_bgp import TopologyBGP -from modulos.topology_ietf import TopologyIetf - - - -class HiloHTTP(threading.Thread): - - def __init__(self): - threading.Thread.__init__(self) - #self.tc = topology_creator - #Código global - #app = flask.Flask(__name__) - #app.config["DEBUG"] = True - - def run (self): - alto.manage_bgp_speaker_updates(0) - #alto.mailbox(8888) - #self.app.run() - -app = flask.Flask(__name__) -app.config["DEBUG"] = True - -@app.route('/', methods=['GET']) -def home(): - return ''' - <h1>API DE ACCESO AL SERVICE ALTO DE PRUEBAS</h1> - <h2>Servicios disponibles:</h2> - <p><ul> - <li>Todos los camimos disjuntos entre A y B: <b><tt> /all/<string:a>/<string:b> </b></tt></li> - <li>Camino más corto entre A y B: <b><tt> /best/<string:a>/<string:b> </b></tt></li> - <li>Mapa de costes: /costs </li> - <li>Mapa de PIDs: /pids </li> - </ul></p> - - - ''' - -################################### -## ## -# Services defined in RFC 7285 # -## ## -################################### - -# Map-Filteriong Service -@app.route('/costmap/filter/<string:pid>', methods=['GET']) -def api_costs_by_pid(pid): - return flask.jsonify(alto.get_costs_map_by_pid(pid)) - -#Endpoint Property Service -@app.route('/properties/<string:pid>', methods=['GET']) -def api_properties(pid): - return flask.jsonify(alto.get_properties(pid)) - -#Endpoint Cost Service -@app.route('/costmap/<string:pid>', methods=['GET']) -def api_endpoint_costs(pid): - return flask.jsonify(alto.get_endpoint_costs(pid)) - -#Map Service -@app.route('/maps', methods=['GET']) -def api_maps(): - return flask.jsonify(alto.get_maps()) - -#Network Map service -@app.route('/costmap', methods=['GET']) -def api_costs(): - return flask.jsonify(alto.get_costs_map()) - -@app.route('/networkmap', methods=['GET']) -def api_pids(): - return flask.jsonify(alto.get_pids()) - -@app.route('/directory', methods=['GET']) -def api_directory(): - return flask.jsonify(alto.get_directory()) - - -################################### -## ## -# Ampliations # -## ## -################################### - - -#All possible paths between A and B without any common node -@app.route('/all/<string:a>/<string:b>', methods=['GET']) -def api_all(a,b): - return flask.jsonify(alto.parseo_yang(str(alto.all_maps(alto.topology, a, b)),"all-paths")) - -#Best path between A and B -@app.route('/best/<string:a>/<string:b>', methods=['GET']) -def api_shortest(a,b): - return flask.jsonify(str(alto.shortest_path(a, b))) - -if __name__ == '__main__': - #Creation of ALTO modules - '''modules={} - modules['bgp'] = TopologyBGP(('localhost',8888)) - #modules['ietf'] = TopologyIetf(('localhost',8081)) - alto = TopologyCreator(modules, 0) - hilos = alto.lanzadera() - - hilo = HiloHTTP() - hilo.start() - hilos.append(hilo) - sleep(30) - alto.get_costs_map() - ''' - -speaker_bgp = ManageBGPSpeaker() -exabgp_process = speaker_bgp.check_tcp_connection() -alto = TopologyCreator(exabgp_process,0) -hilo = HiloHTTP() -hilo.start() -#app.run(host='192.168.165.193', port=8080) -app.run() diff --git a/src/alto/service/ietf2_prueba.json b/src/alto/service/ietf2_prueba.json deleted file mode 100644 index 154266906b5138fed966c494d5e931283e08a955..0000000000000000000000000000000000000000 --- a/src/alto/service/ietf2_prueba.json +++ /dev/null @@ -1,1952 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "0 : 0 : 0", - "network-types": { - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/0 - ATN950C-2_HL5-3-1 GigabitEthernet0/2/4" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/10 - NE40X2-1_HL4-2-1 50|100GE0/1/0" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/2 - NE40X8-3_HL4 GigabitEthernet4/0/1" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/8 - ATN950C-1_HL5-1-1 Ethernet0/0/0" - }, - { - "link-id": "ATN950C-1_HL5-1-1 Ethernet0/0/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/8" - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/2/4 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0 - HL2-2-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1 - HL2-3-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2 - HL2-1-2 ge-0/0/2" - }, - { - "link-id": "HL2-1-2 ge-0/0/0 - HL2-2-2 ge-0/0/0" - }, - { - "link-id": "HL2-1-2 ge-0/0/1 - HL2-3-2 ge-0/0/1" - }, - { - "link-id": "HL2-1-2 ge-0/0/2 - HL2-1-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0 - HL2-1-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1 - HL2-4-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2 - HL2-2-2 ge-0/0/2" - }, - { - "link-id": "HL2-2-2 ge-0/0/0 - HL2-1-2 ge-0/0/0" - }, - { - "link-id": "HL2-2-2 ge-0/0/1 - HL2-4-2 ge-0/0/1" - }, - { - "link-id": "HL2-2-2 ge-0/0/2 - HL2-2-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0 - HL2-4-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1 - HL2-1-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2 - HL2-3-2 ge-0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3 - NE40X8-1_HL3 GigabitEthernet4/0/2" - }, - { - "link-id": "HL2-3-2 ge-0/0/0 - HL2-4-2 ge-0/0/0" - }, - { - "link-id": "HL2-3-2 ge-0/0/1 - HL2-1-2 ge-0/0/1" - }, - { - "link-id": "HL2-3-2 ge-0/0/2 - HL2-3-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0 - HL2-3-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1 - HL2-2-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2 - HL2-4-2 ge-0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/3 - HL3-2-2 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/0 - HL2-3-2 ge-0/0/0" - }, - { - "link-id": "HL2-4-2 ge-0/0/1 - HL2-2-2 ge-0/0/1" - }, - { - "link-id": "HL2-4-2 ge-0/0/2 - HL2-4-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/4 - HL3-2-2 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1 - HL2-4-2 ge-0/0/4" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/2 - HL2-4-1 GigabitEthernet0/0/0/3" - }, - { - "link-id": "NE40X2-1_HL4-2-1 50|100GE0/1/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/10" - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/2" - } - ] - }, - { - "network-id": "0 : 0 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "3.3.3.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-1_HL3", - "router-id": ["3.3.3.1"], - "prefix": [ - { - "prefix": "3.3.3.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-2_HL3", - "router-id": ["3.3.3.2"], - "prefix": [ - { - "prefix": "3.3.3.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-3_HL4", - "router-id": ["4.4.4.1"], - "prefix": [ - { - "prefix": "4.4.4.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X2-1_HL4-2-1", - "router-id": ["4.4.4.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "50|100GE0/1/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-1-2", - "router-id": ["5.5.5.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-1_HL5-1-1", - "router-id": ["5.5.5.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "Ethernet0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN910C-2_HL5-2-1", - "router-id": ["5.5.5.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/2/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/10", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/8", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-2_HL5-3-1", - "router-id": ["5.5.5.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-2-2", - "router-id": ["5.5.5.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.6", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "AS7315-30X", - "router-id": ["5.5.5.6"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54 - ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41 - NE40X8-3_HL4 GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56 - NE40X8-3_HL4 GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "-1", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/0.43 - NE40X8-3_HL4 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/1.35 - NE40X8-2_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/2.52 - HL2-3-1 GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "-1", - "metric2": "-1", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-2_HL3 GigabitEthernet4/0/1.35 - NE40X8-1_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/0.43 - NE40X8-1_HL3 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1.56 - ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/6.41 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "3.3.3.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_3", - "router-id": ["3.3.3.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "to_HL2-3-2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_4", - "router-id": ["4.4.4.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_7750SR7_HL4 - 7750SR-7_4 to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "7750SR-7_4 to_7750SR7_HL3 - 7750SR-7_3 to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1111 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-1", - "router-id": ["1.1.1.1"], - "prefix": [ - { - "prefix": "1.1.1.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-1", - "router-id": ["1.1.1.2"], - "prefix": [ - { - "prefix": "1.1.1.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-1", - "router-id": ["1.1.1.4"], - "prefix": [ - { - "prefix": "1.1.1.4/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-2", - "router-id": ["2.2.2.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-2", - "router-id": ["2.2.2.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.22"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-2", - "router-id": ["2.2.2.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.33"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-2", - "router-id": ["2.2.2.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.44"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL3-2-2", - "router-id": ["3.3.3.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_HL2-3-2 - HL2-3-2 ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0.12 - HL2-2-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1.13 - HL2-3-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2.11 - HL2-1-2 ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/0.12 - HL2-2-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "12", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/1.13 - HL2-3-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0.12 - HL2-1-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1.24 - HL2-4-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2.22 - HL2-2-2 ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/0.12 - HL2-1-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/1.24 - HL2-4-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/2.22 - HL2-2-1 GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0.34 - HL2-4-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1.13 - HL2-1-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2.33 - HL2-3-2 ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3.52 - NE40X8-1_HL3 GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/0.34 - HL2-4-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/1.13 - HL2-1-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/2.33 - HL2-3-1 GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/4.50 - 7750SR-7_3 to_HL2-3-2", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0.34 - HL2-3-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1.24 - HL2-2-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2.44 - HL2-4-2 ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/0.34 - HL2-3-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/1.24 - HL2-2-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/2.44 - HL2-4-1 GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/4.51 - HL3-2-2 GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1.51 - HL2-4-2 ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "TEST_Cisco", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "['4.4.4.1']", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "Cisco-R3" - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0" - }, - { - "tp-id": "GigabitEthernet0/0/0/1" - }, - { - "tp-id": "GigabitEthernet0/0/0/2" - }, - { - "tp-id": "GigabitEthernet0/0/0/3" - }, - { - "tp-id": "GigabitEthernet0/0/0/4" - }, - { - "tp-id": "GigabitEthernet0/0/0/5" - }, - { - "tp-id": "GigabitEthernet0/0/0/6" - }, - { - "tp-id": "Loopback0" - }, - { - "tp-id": "Loopback76" - }, - { - "tp-id": "MgmtEth0/RP0/CPU0/0" - } - ] - } - ] - } - ] - } -} diff --git a/src/alto/service/ietf_prueba.json b/src/alto/service/ietf_prueba.json deleted file mode 100644 index 81580e985d58e7a26beb37b985fac6d586d92ea7..0000000000000000000000000000000000000000 --- a/src/alto/service/ietf_prueba.json +++ /dev/null @@ -1,1952 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "0 : 0 : 0", - "network-types": { - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/0 - ATN950C-2_HL5-3-1 GigabitEthernet0/2/4" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/10 - NE40X2-1_HL4-2-1 50|100GE0/1/0" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/2 - NE40X8-3_HL4 GigabitEthernet4/0/1" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/8 - ATN950C-1_HL5-1-1 Ethernet0/0/0" - }, - { - "link-id": "ATN950C-1_HL5-1-1 Ethernet0/0/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/8" - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/2/4 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0 - HL2-2-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1 - HL2-3-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2 - HL2-1-2 ge-0/0/2" - }, - { - "link-id": "HL2-1-2 ge-0/0/0 - HL2-2-2 ge-0/0/0" - }, - { - "link-id": "HL2-1-2 ge-0/0/1 - HL2-3-2 ge-0/0/1" - }, - { - "link-id": "HL2-1-2 ge-0/0/2 - HL2-1-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0 - HL2-1-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1 - HL2-4-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2 - HL2-2-2 ge-0/0/2" - }, - { - "link-id": "HL2-2-2 ge-0/0/0 - HL2-1-2 ge-0/0/0" - }, - { - "link-id": "HL2-2-2 ge-0/0/1 - HL2-4-2 ge-0/0/1" - }, - { - "link-id": "HL2-2-2 ge-0/0/2 - HL2-2-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0 - HL2-4-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1 - HL2-1-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2 - HL2-3-2 ge-0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3 - NE40X8-1_HL3 GigabitEthernet4/0/2" - }, - { - "link-id": "HL2-3-2 ge-0/0/0 - HL2-4-2 ge-0/0/0" - }, - { - "link-id": "HL2-3-2 ge-0/0/1 - HL2-1-2 ge-0/0/1" - }, - { - "link-id": "HL2-3-2 ge-0/0/2 - HL2-3-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0 - HL2-3-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1 - HL2-2-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2 - HL2-4-2 ge-0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/3 - HL3-2-2 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/0 - HL2-3-2 ge-0/0/0" - }, - { - "link-id": "HL2-4-2 ge-0/0/1 - HL2-2-2 ge-0/0/1" - }, - { - "link-id": "HL2-4-2 ge-0/0/2 - HL2-4-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/4 - HL3-2-2 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1 - HL2-4-2 ge-0/0/4" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/2 - HL2-4-1 GigabitEthernet0/0/0/3" - }, - { - "link-id": "NE40X2-1_HL4-2-1 50|100GE0/1/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/10" - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/2" - } - ] - }, - { - "network-id": "0 : 0 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "3.3.3.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-1_HL3", - "router-id": ["3.3.3.1"], - "prefix": [ - { - "prefix": "3.3.3.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-2_HL3", - "router-id": ["3.3.3.2"], - "prefix": [ - { - "prefix": "3.3.3.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-3_HL4", - "router-id": ["4.4.4.1"], - "prefix": [ - { - "prefix": "4.4.4.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X2-1_HL4-2-1", - "router-id": ["4.4.4.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "50|100GE0/1/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-1-2", - "router-id": ["5.5.5.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-1_HL5-1-1", - "router-id": ["5.5.5.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "Ethernet0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN910C-2_HL5-2-1", - "router-id": ["5.5.5.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/2/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/10", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/8", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-2_HL5-3-1", - "router-id": ["5.5.5.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-2-2", - "router-id": ["5.5.5.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.6", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "AS7315-30X", - "router-id": ["5.5.5.6"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54 - ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41 - NE40X8-3_HL4 GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56 - NE40X8-3_HL4 GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/0.43 - NE40X8-3_HL4 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/1.35 - NE40X8-2_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/2.52 - HL2-3-1 GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "-1", - "metric2": "-1", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-2_HL3 GigabitEthernet4/0/1.35 - NE40X8-1_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/0.43 - NE40X8-1_HL3 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1.56 - ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/6.41 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "3.3.3.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_3", - "router-id": ["3.3.3.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "to_HL2-3-2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_4", - "router-id": ["4.4.4.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_7750SR7_HL4 - 7750SR-7_4 to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "7750SR-7_4 to_7750SR7_HL3 - 7750SR-7_3 to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1111 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-1", - "router-id": ["1.1.1.1"], - "prefix": [ - { - "prefix": "1.1.1.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-1", - "router-id": ["1.1.1.2"], - "prefix": [ - { - "prefix": "1.1.1.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-1", - "router-id": ["1.1.1.4"], - "prefix": [ - { - "prefix": "1.1.1.4/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-2", - "router-id": ["2.2.2.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-2", - "router-id": ["2.2.2.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.22"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-2", - "router-id": ["2.2.2.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.33"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-2", - "router-id": ["2.2.2.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.44"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL3-2-2", - "router-id": ["3.3.3.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_HL2-3-2 - HL2-3-2 ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0.12 - HL2-2-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1.13 - HL2-3-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2.11 - HL2-1-2 ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/0.12 - HL2-2-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "12", - "metric2": "12", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/1.13 - HL2-3-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0.12 - HL2-1-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1.24 - HL2-4-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2.22 - HL2-2-2 ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/0.12 - HL2-1-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/1.24 - HL2-4-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/2.22 - HL2-2-1 GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0.34 - HL2-4-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1.13 - HL2-1-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2.33 - HL2-3-2 ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3.52 - NE40X8-1_HL3 GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/0.34 - HL2-4-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/1.13 - HL2-1-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/2.33 - HL2-3-1 GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/4.50 - 7750SR-7_3 to_HL2-3-2", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0.34 - HL2-3-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1.24 - HL2-2-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2.44 - HL2-4-2 ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/0.34 - HL2-3-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/1.24 - HL2-2-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/2.44 - HL2-4-1 GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/4.51 - HL3-2-2 GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1.51 - HL2-4-2 ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "TEST_Cisco", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "['4.4.4.1']", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "Cisco-R3" - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0" - }, - { - "tp-id": "GigabitEthernet0/0/0/1" - }, - { - "tp-id": "GigabitEthernet0/0/0/2" - }, - { - "tp-id": "GigabitEthernet0/0/0/3" - }, - { - "tp-id": "GigabitEthernet0/0/0/4" - }, - { - "tp-id": "GigabitEthernet0/0/0/5" - }, - { - "tp-id": "GigabitEthernet0/0/0/6" - }, - { - "tp-id": "Loopback0" - }, - { - "tp-id": "Loopback76" - }, - { - "tp-id": "MgmtEth0/RP0/CPU0/0" - } - ] - } - ] - } - ] - } -} diff --git a/src/alto/service/index.html b/src/alto/service/index.html deleted file mode 100644 index 41ddd02a67e563119d05694ddb28ae85320b90fd..0000000000000000000000000000000000000000 --- a/src/alto/service/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - <h1>API DE ACCESO AL SERVICE ALTO DE PRUEBAS</h1> - <h2>Servicios disponibles:</h2> - <p><ul> - <li>Todos los camimos disjuntos entre A y B: <b><tt> /all/<string:a>/<string:b> </b></tt></li> - <li>Camino más corto entre A y B: <b><tt> /best/<string:a>/<string:b> </b></tt></li> - <li>Mapa de costes: /costs </li> - <li>Mapa de PIDs: /pids </li> - </ul></p> - - - \ No newline at end of file diff --git a/src/alto/service/kafka_ale/LICENSE b/src/alto/service/kafka_ale/LICENSE deleted file mode 100644 index bf5d31e1ca904669a463d73a098bb56ef926d8e9..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/LICENSE +++ /dev/null @@ -1,332 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -------------------------------------------------------------------------------- -This project bundles some components that are also licensed under the Apache -License Version 2.0: - -audience-annotations-0.5.0 -commons-cli-1.4 -commons-lang3-3.12.0 -jackson-annotations-2.13.3 -jackson-core-2.13.3 -jackson-databind-2.13.3 -jackson-dataformat-csv-2.13.3 -jackson-dataformat-yaml-2.13.3 -jackson-datatype-jdk8-2.13.3 -jackson-datatype-jsr310-2.13.3 -jackson-jaxrs-base-2.13.3 -jackson-jaxrs-json-provider-2.13.3 -jackson-module-jaxb-annotations-2.13.3 -jackson-module-scala_2.13-2.13.3 -jackson-module-scala_2.12-2.13.3 -jakarta.validation-api-2.0.2 -javassist-3.27.0-GA -jetty-client-9.4.48.v20220622 -jetty-continuation-9.4.48.v20220622 -jetty-http-9.4.48.v20220622 -jetty-io-9.4.48.v20220622 -jetty-security-9.4.48.v20220622 -jetty-server-9.4.48.v20220622 -jetty-servlet-9.4.48.v20220622 -jetty-servlets-9.4.48.v20220622 -jetty-util-9.4.48.v20220622 -jetty-util-ajax-9.4.48.v20220622 -jersey-common-2.34 -jersey-server-2.34 -jose4j-0.7.9 -lz4-java-1.8.0 -maven-artifact-3.8.4 -metrics-core-4.1.12.1 -metrics-core-2.2.0 -netty-buffer-4.1.78.Final -netty-codec-4.1.78.Final -netty-common-4.1.78.Final -netty-handler-4.1.78.Final -netty-resolver-4.1.78.Final -netty-transport-4.1.78.Final -netty-transport-classes-epoll-4.1.78.Final -netty-transport-native-epoll-4.1.78.Final -netty-transport-native-unix-common-4.1.78.Final -plexus-utils-3.3.0 -reload4j-1.2.19 -rocksdbjni-6.29.4.1 -scala-collection-compat_2.13-2.6.0 -scala-library-2.13.8 -scala-logging_2.13-3.9.4 -scala-reflect-2.13.8 -scala-java8-compat_2.13-1.0.2 -snakeyaml-1.30 -snappy-java-1.1.8.4 -swagger-annotations-2.2.0 -swagger-core-2.2.0 -swagger-integration-2.2.0 -swagger-jaxrs2-2.2.0 -swagger-models-2.2.0 -zookeeper-3.6.3 -zookeeper-jute-3.6.3 - -=============================================================================== -This product bundles various third-party components under other open source -licenses. This section summarizes those components and their licenses. -See licenses/ for text of these licenses. - ---------------------------------------- -Eclipse Distribution License - v 1.0 -see: licenses/eclipse-distribution-license-1.0 - -jakarta.activation-api-1.2.2 -jakarta.xml.bind-api-2.3.3 - ---------------------------------------- -Eclipse Public License - v 2.0 -see: licenses/eclipse-public-license-2.0 - -jakarta.annotation-api-1.3.5 -jakarta.ws.rs-api-2.1.6 -javax.ws.rs-api-2.1.1 -hk2-api-2.6.1 -hk2-locator-2.6.1 -hk2-utils-2.6.1 -osgi-resource-locator-1.0.3 -aopalliance-repackaged-2.6.1 -jakarta.inject-2.6.1 -jersey-container-servlet-2.34 -jersey-container-servlet-core-2.34 -jersey-client-2.34 -jersey-hk2-2.34 -jersey-media-jaxb-2.31 - ---------------------------------------- -CDDL 1.1 + GPLv2 with classpath exception -see: licenses/CDDL+GPL-1.1 - -javax.servlet-api-3.1.0 -jaxb-api-2.3.0 -activation-1.1.1 - ---------------------------------------- -MIT License - -argparse4j-0.7.0, see: licenses/argparse-MIT -jopt-simple-5.0.4, see: licenses/jopt-simple-MIT -slf4j-api-1.7.36, see: licenses/slf4j-MIT -slf4j-reload4j-1.7.36, see: licenses/slf4j-MIT -classgraph-4.8.138, see: license/classgraph-MIT - ---------------------------------------- -BSD 2-Clause - -zstd-jni-1.5.2-1 see: licenses/zstd-jni-BSD-2-clause - ---------------------------------------- -BSD 3-Clause - -jline-3.21.0, see: licenses/jline-BSD-3-clause -paranamer-2.8, see: licenses/paranamer-BSD-3-clause - ---------------------------------------- -Do What The F*ck You Want To Public License -see: licenses/DWTFYWTPL - -reflections-0.9.12 diff --git a/src/alto/service/kafka_ale/NOTICE b/src/alto/service/kafka_ale/NOTICE deleted file mode 100644 index a50c86d84b7d75f86a7cf89bc1409e6f0a33ced7..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/NOTICE +++ /dev/null @@ -1,856 +0,0 @@ -Apache Kafka -Copyright 2021 The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (https://www.apache.org/). - -This distribution has a binary dependency on jersey, which is available under the CDDL -License. The source code of jersey can be found at https://github.com/jersey/jersey/. - -This distribution has a binary test dependency on jqwik, which is available under -the Eclipse Public License 2.0. The source code can be found at -https://github.com/jlink/jqwik. - -The streams-scala (streams/streams-scala) module was donated by Lightbend and the original code was copyrighted by them: -Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com> -Copyright (C) 2017-2018 Alexis Seigneurin. - -This project contains the following code copied from Apache Hadoop: -clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java -Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. - -This project contains the following code copied from Apache Hive: -streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java - -// ------------------------------------------------------------------ -// NOTICE file corresponding to the section 4d of The Apache License, -// Version 2.0, in this case for -// ------------------------------------------------------------------ - -# Notices for Eclipse GlassFish - -This content is produced and maintained by the Eclipse GlassFish project. - -* Project home: https://projects.eclipse.org/projects/ee4j.glassfish - -## Trademarks - -Eclipse GlassFish, and GlassFish are trademarks of the Eclipse Foundation. - -## Copyright - -All content is the property of the respective authors or their employers. For -more information regarding authorship of content, please consult the listed -source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Public License v. 2.0 which is available at -http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made -available under the following Secondary Licenses when the conditions for such -availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU -General Public License, version 2 with the GNU Classpath Exception which is -available at https://www.gnu.org/software/classpath/license.html. - -SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - -## Source Code - -The project maintains the following source code repositories: - -* https://github.com/eclipse-ee4j/glassfish-ha-api -* https://github.com/eclipse-ee4j/glassfish-logging-annotation-processor -* https://github.com/eclipse-ee4j/glassfish-shoal -* https://github.com/eclipse-ee4j/glassfish-cdi-porting-tck -* https://github.com/eclipse-ee4j/glassfish-jsftemplating -* https://github.com/eclipse-ee4j/glassfish-hk2-extra -* https://github.com/eclipse-ee4j/glassfish-hk2 -* https://github.com/eclipse-ee4j/glassfish-fighterfish - -## Third-party Content - -This project leverages the following third party content. - -None - -## Cryptography - -Content may contain encryption software. The country in which you are currently -may have restrictions on the import, possession, and use, and/or re-export to -another country, of encryption software. BEFORE using any encryption software, -please check the country's laws, regulations and policies concerning the import, -possession, or use, and re-export of encryption software, to see if this is -permitted. - - -Apache Yetus - Audience Annotations -Copyright 2015-2017 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -Apache Commons CLI -Copyright 2001-2017 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -Apache Commons Lang -Copyright 2001-2018 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. - - -# Notices for Eclipse Project for JAF - -This content is produced and maintained by the Eclipse Project for JAF project. - -* Project home: https://projects.eclipse.org/projects/ee4j.jaf - -## Copyright - -All content is the property of the respective authors or their employers. For -more information regarding authorship of content, please consult the listed -source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Distribution License v. 1.0, -which is available at http://www.eclipse.org/org/documents/edl-v10.php. - -SPDX-License-Identifier: BSD-3-Clause - -## Source Code - -The project maintains the following source code repositories: - -* https://github.com/eclipse-ee4j/jaf - -## Third-party Content - -This project leverages the following third party content. - -JUnit (4.12) - -* License: Eclipse Public License - - -# Notices for Jakarta Annotations - -This content is produced and maintained by the Jakarta Annotations project. - - * Project home: https://projects.eclipse.org/projects/ee4j.ca - -## Trademarks - -Jakarta Annotations is a trademark of the Eclipse Foundation. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Public License v. 2.0 which is available at -http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made -available under the following Secondary Licenses when the conditions for such -availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU -General Public License, version 2 with the GNU Classpath Exception which is -available at https://www.gnu.org/software/classpath/license.html. - -SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - -## Source Code - -The project maintains the following source code repositories: - - * https://github.com/eclipse-ee4j/common-annotations-api - -## Third-party Content - -## Cryptography - -Content may contain encryption software. The country in which you are currently -may have restrictions on the import, possession, and use, and/or re-export to -another country, of encryption software. BEFORE using any encryption software, -please check the country's laws, regulations and policies concerning the import, -possession, or use, and re-export of encryption software, to see if this is -permitted. - - -# Notices for the Jakarta RESTful Web Services Project - -This content is produced and maintained by the **Jakarta RESTful Web Services** -project. - -* Project home: https://projects.eclipse.org/projects/ee4j.jaxrs - -## Trademarks - -**Jakarta RESTful Web Services** is a trademark of the Eclipse Foundation. - -## Copyright - -All content is the property of the respective authors or their employers. For -more information regarding authorship of content, please consult the listed -source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Public License v. 2.0 which is available at -http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made -available under the following Secondary Licenses when the conditions for such -availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU -General Public License, version 2 with the GNU Classpath Exception which is -available at https://www.gnu.org/software/classpath/license.html. - -SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - -## Source Code - -The project maintains the following source code repositories: - -* https://github.com/eclipse-ee4j/jaxrs-api - -## Third-party Content - -This project leverages the following third party content. - -javaee-api (7.0) - -* License: Apache-2.0 AND W3C - -JUnit (4.11) - -* License: Common Public License 1.0 - -Mockito (2.16.0) - -* Project: http://site.mockito.org -* Source: https://github.com/mockito/mockito/releases/tag/v2.16.0 - -## Cryptography - -Content may contain encryption software. The country in which you are currently -may have restrictions on the import, possession, and use, and/or re-export to -another country, of encryption software. BEFORE using any encryption software, -please check the country's laws, regulations and policies concerning the import, -possession, or use, and re-export of encryption software, to see if this is -permitted. - - -# Notices for Eclipse Project for JAXB - -This content is produced and maintained by the Eclipse Project for JAXB project. - -* Project home: https://projects.eclipse.org/projects/ee4j.jaxb - -## Trademarks - -Eclipse Project for JAXB is a trademark of the Eclipse Foundation. - -## Copyright - -All content is the property of the respective authors or their employers. For -more information regarding authorship of content, please consult the listed -source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Distribution License v. 1.0 which is available -at http://www.eclipse.org/org/documents/edl-v10.php. - -SPDX-License-Identifier: BSD-3-Clause - -## Source Code - -The project maintains the following source code repositories: - -* https://github.com/eclipse-ee4j/jaxb-api - -## Third-party Content - -This project leverages the following third party content. - -None - -## Cryptography - -Content may contain encryption software. The country in which you are currently -may have restrictions on the import, possession, and use, and/or re-export to -another country, of encryption software. BEFORE using any encryption software, -please check the country's laws, regulations and policies concerning the import, -possession, or use, and re-export of encryption software, to see if this is -permitted. - - -# Notice for Jersey -This content is produced and maintained by the Eclipse Jersey project. - -* Project home: https://projects.eclipse.org/projects/ee4j.jersey - -## Trademarks -Eclipse Jersey is a trademark of the Eclipse Foundation. - -## Copyright - -All content is the property of the respective authors or their employers. For -more information regarding authorship of content, please consult the listed -source code repository logs. - -## Declared Project Licenses - -This program and the accompanying materials are made available under the terms -of the Eclipse Public License v. 2.0 which is available at -http://www.eclipse.org/legal/epl-2.0. This Source Code may also be made -available under the following Secondary Licenses when the conditions for such -availability set forth in the Eclipse Public License v. 2.0 are satisfied: GNU -General Public License, version 2 with the GNU Classpath Exception which is -available at https://www.gnu.org/software/classpath/license.html. - -SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - -## Source Code -The project maintains the following source code repositories: - -* https://github.com/eclipse-ee4j/jersey - -## Third-party Content - -Angular JS, v1.6.6 -* License MIT (http://www.opensource.org/licenses/mit-license.php) -* Project: http://angularjs.org -* Coyright: (c) 2010-2017 Google, Inc. - -aopalliance Version 1 -* License: all the source code provided by AOP Alliance is Public Domain. -* Project: http://aopalliance.sourceforge.net -* Copyright: Material in the public domain is not protected by copyright - -Bean Validation API 2.0.2 -* License: Apache License, 2.0 -* Project: http://beanvalidation.org/1.1/ -* Copyright: 2009, Red Hat, Inc. and/or its affiliates, and individual contributors -* by the @authors tag. - -Hibernate Validator CDI, 6.1.2.Final -* License: Apache License, 2.0 -* Project: https://beanvalidation.org/ -* Repackaged in org.glassfish.jersey.server.validation.internal.hibernate - -Bootstrap v3.3.7 -* License: MIT license (https://github.com/twbs/bootstrap/blob/master/LICENSE) -* Project: http://getbootstrap.com -* Copyright: 2011-2016 Twitter, Inc - -Google Guava Version 18.0 -* License: Apache License, 2.0 -* Copyright (C) 2009 The Guava Authors - -javax.inject Version: 1 -* License: Apache License, 2.0 -* Copyright (C) 2009 The JSR-330 Expert Group - -Javassist Version 3.25.0-GA -* License: Apache License, 2.0 -* Project: http://www.javassist.org/ -* Copyright (C) 1999- Shigeru Chiba. All Rights Reserved. - -Jackson JAX-RS Providers Version 2.10.1 -* License: Apache License, 2.0 -* Project: https://github.com/FasterXML/jackson-jaxrs-providers -* Copyright: (c) 2009-2011 FasterXML, LLC. All rights reserved unless otherwise indicated. - -jQuery v1.12.4 -* License: jquery.org/license -* Project: jquery.org -* Copyright: (c) jQuery Foundation - -jQuery Barcode plugin 0.3 -* License: MIT & GPL (http://www.opensource.org/licenses/mit-license.php & http://www.gnu.org/licenses/gpl.html) -* Project: http://www.pasella.it/projects/jQuery/barcode -* Copyright: (c) 2009 Antonello Pasella antonello.pasella@gmail.com - -JSR-166 Extension - JEP 266 -* License: CC0 -* No copyright -* Written by Doug Lea with assistance from members of JCP JSR-166 Expert Group and released to the public domain, as explained at http://creativecommons.org/publicdomain/zero/1.0/ - -KineticJS, v4.7.1 -* License: MIT license (http://www.opensource.org/licenses/mit-license.php) -* Project: http://www.kineticjs.com, https://github.com/ericdrowell/KineticJS -* Copyright: Eric Rowell - -org.objectweb.asm Version 8.0 -* License: Modified BSD (http://asm.objectweb.org/license.html) -* Copyright (c) 2000-2011 INRIA, France Telecom. All rights reserved. - -org.osgi.core version 6.0.0 -* License: Apache License, 2.0 -* Copyright (c) OSGi Alliance (2005, 2008). All Rights Reserved. - -org.glassfish.jersey.server.internal.monitoring.core -* License: Apache License, 2.0 -* Copyright (c) 2015-2018 Oracle and/or its affiliates. All rights reserved. -* Copyright 2010-2013 Coda Hale and Yammer, Inc. - -W3.org documents -* License: W3C License -* Copyright: Copyright (c) 1994-2001 World Wide Web Consortium, (Massachusetts Institute of Technology, Institut National de Recherche en Informatique et en Automatique, Keio University). All Rights Reserved. http://www.w3.org/Consortium/Legal/ - - -============================================================== - Jetty Web Container - Copyright 1995-2018 Mort Bay Consulting Pty Ltd. -============================================================== - -The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd -unless otherwise noted. - -Jetty is dual licensed under both - - * The Apache 2.0 License - http://www.apache.org/licenses/LICENSE-2.0.html - - and - - * The Eclipse Public 1.0 License - http://www.eclipse.org/legal/epl-v10.html - -Jetty may be distributed under either license. - ------- -Eclipse - -The following artifacts are EPL. - * org.eclipse.jetty.orbit:org.eclipse.jdt.core - -The following artifacts are EPL and ASL2. - * org.eclipse.jetty.orbit:javax.security.auth.message - - -The following artifacts are EPL and CDDL 1.0. - * org.eclipse.jetty.orbit:javax.mail.glassfish - - ------- -Oracle - -The following artifacts are CDDL + GPLv2 with classpath exception. -https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html - - * javax.servlet:javax.servlet-api - * javax.annotation:javax.annotation-api - * javax.transaction:javax.transaction-api - * javax.websocket:javax.websocket-api - ------- -Oracle OpenJDK - -If ALPN is used to negotiate HTTP/2 connections, then the following -artifacts may be included in the distribution or downloaded when ALPN -module is selected. - - * java.sun.security.ssl - -These artifacts replace/modify OpenJDK classes. The modififications -are hosted at github and both modified and original are under GPL v2 with -classpath exceptions. -http://openjdk.java.net/legal/gplv2+ce.html - - ------- -OW2 - -The following artifacts are licensed by the OW2 Foundation according to the -terms of http://asm.ow2.org/license.html - -org.ow2.asm:asm-commons -org.ow2.asm:asm - - ------- -Apache - -The following artifacts are ASL2 licensed. - -org.apache.taglibs:taglibs-standard-spec -org.apache.taglibs:taglibs-standard-impl - - ------- -MortBay - -The following artifacts are ASL2 licensed. Based on selected classes from -following Apache Tomcat jars, all ASL2 licensed. - -org.mortbay.jasper:apache-jsp - org.apache.tomcat:tomcat-jasper - org.apache.tomcat:tomcat-juli - org.apache.tomcat:tomcat-jsp-api - org.apache.tomcat:tomcat-el-api - org.apache.tomcat:tomcat-jasper-el - org.apache.tomcat:tomcat-api - org.apache.tomcat:tomcat-util-scan - org.apache.tomcat:tomcat-util - -org.mortbay.jasper:apache-el - org.apache.tomcat:tomcat-jasper-el - org.apache.tomcat:tomcat-el-api - - ------- -Mortbay - -The following artifacts are CDDL + GPLv2 with classpath exception. - -https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html - -org.eclipse.jetty.toolchain:jetty-schemas - ------- -Assorted - -The UnixCrypt.java code implements the one way cryptography used by -Unix systems for simple password protection. Copyright 1996 Aki Yoshida, -modified April 2001 by Iris Van den Broeke, Daniel Deville. -Permission to use, copy, modify and distribute UnixCrypt -for non-commercial or commercial purposes and without fee is -granted provided that the copyright notice appears in all copies. - - -Apache log4j -Copyright 2007 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -Maven Artifact -Copyright 2001-2019 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -This product includes software developed by the Indiana University - Extreme! Lab (http://www.extreme.indiana.edu/). - -This product includes software developed by -The Apache Software Foundation (http://www.apache.org/). - -This product includes software developed by -ThoughtWorks (http://www.thoughtworks.com). - -This product includes software developed by -javolution (http://javolution.org/). - -This product includes software developed by -Rome (https://rome.dev.java.net/). - - -Scala -Copyright (c) 2002-2020 EPFL -Copyright (c) 2011-2020 Lightbend, Inc. - -Scala includes software developed at -LAMP/EPFL (https://lamp.epfl.ch/) and -Lightbend, Inc. (https://www.lightbend.com/). - -Licensed under the Apache License, Version 2.0 (the "License"). -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This software includes projects with other licenses -- see `doc/LICENSE.md`. - - -Apache ZooKeeper - Server -Copyright 2008-2021 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -Apache ZooKeeper - Jute -Copyright 2008-2021 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -The Netty Project - ================= - -Please visit the Netty web site for more information: - - * https://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE.<component>.txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - -------------------------------------------------------------------------------- -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * https://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * https://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * https://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * https://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * http://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - -This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. -This private header is also used by Apple's open source - mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). - - * LICENSE: - * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) - * HOMEPAGE: - * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h \ No newline at end of file diff --git a/src/alto/service/kafka_ale/__int__.py b/src/alto/service/kafka_ale/__int__.py deleted file mode 100644 index 2c0e900c840c3a053a5cbb1df36451b5bc9677ea..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/__int__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .kafka_api.py import AltoProducer -from .kafka_api.py import AltoConsumer diff --git a/src/alto/service/kafka_ale/bin/connect-distributed.sh b/src/alto/service/kafka_ale/bin/connect-distributed.sh deleted file mode 100644 index b8088ad92345137d6861553619ba9843dce2fb5a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/connect-distributed.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] connect-distributed.properties" - exit 1 -fi - -base_dir=$(dirname $0) - -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" -fi - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G" -fi - -EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'} - -COMMAND=$1 -case $COMMAND in - -daemon) - EXTRA_ARGS="-daemon "$EXTRA_ARGS - shift - ;; - *) - ;; -esac - -exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@" diff --git a/src/alto/service/kafka_ale/bin/connect-mirror-maker.sh b/src/alto/service/kafka_ale/bin/connect-mirror-maker.sh deleted file mode 100644 index 8e2b2e162daac7e026c499da03f10ab1b08937a9..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/connect-mirror-maker.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] mm2.properties" - exit 1 -fi - -base_dir=$(dirname $0) - -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" -fi - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G" -fi - -EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'} - -COMMAND=$1 -case $COMMAND in - -daemon) - EXTRA_ARGS="-daemon "$EXTRA_ARGS - shift - ;; - *) - ;; -esac - -exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@" diff --git a/src/alto/service/kafka_ale/bin/connect-standalone.sh b/src/alto/service/kafka_ale/bin/connect-standalone.sh deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/alto/service/kafka_ale/bin/kafka-acls.sh b/src/alto/service/kafka_ale/bin/kafka-acls.sh deleted file mode 100644 index 8fa65542e10bfa32e3a9fea7e0ec0d3161d92e92..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-acls.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-broker-api-versions.sh b/src/alto/service/kafka_ale/bin/kafka-broker-api-versions.sh deleted file mode 100644 index 4f560a0a60cd599b6a66bc573167dee26d36c67d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-broker-api-versions.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-cluster.sh b/src/alto/service/kafka_ale/bin/kafka-cluster.sh deleted file mode 100644 index 574007e9cd4b8ad540a3ab3918d52df195fd90a1..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-cluster.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.ClusterTool "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-configs.sh b/src/alto/service/kafka_ale/bin/kafka-configs.sh deleted file mode 100644 index 2f9eb8c239f596ec8d171f40ddec38a13177a2e8..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-configs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-console-consumer.sh b/src/alto/service/kafka_ale/bin/kafka-console-consumer.sh deleted file mode 100644 index dbaac2b83b1890eeeeb1a19c5831254fe408d788..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-console-consumer.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-console-producer.sh b/src/alto/service/kafka_ale/bin/kafka-console-producer.sh deleted file mode 100644 index e5187b8b5335fa2a688f9c18665266731ced0506..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-console-producer.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi -exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-consumer-groups.sh b/src/alto/service/kafka_ale/bin/kafka-consumer-groups.sh deleted file mode 100644 index feb063de75693b73d0f874a03d14ce87294df7c5..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-consumer-groups.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-consumer-perf-test.sh b/src/alto/service/kafka_ale/bin/kafka-consumer-perf-test.sh deleted file mode 100644 index 77cda721d6c5238d354f3f133bab93c5352edd6a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-consumer-perf-test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi -exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-delegation-tokens.sh b/src/alto/service/kafka_ale/bin/kafka-delegation-tokens.sh deleted file mode 100644 index 49cb276ab318289f690c6855a326804d2cf720ab..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-delegation-tokens.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.DelegationTokenCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-delete-records.sh b/src/alto/service/kafka_ale/bin/kafka-delete-records.sh deleted file mode 100644 index 8726f919992329db81c4e1b8da6765e34184f1e0..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-delete-records.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-dump-log.sh b/src/alto/service/kafka_ale/bin/kafka-dump-log.sh deleted file mode 100644 index a97ea7d3d9f8cf57b9ea41012ab8c9b92d389d8d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-dump-log.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-features.sh b/src/alto/service/kafka_ale/bin/kafka-features.sh deleted file mode 100644 index 9dd9f16fd1b05568db04418be9c0c99adf8f9324..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-features.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.FeatureCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-get-offsets.sh b/src/alto/service/kafka_ale/bin/kafka-get-offsets.sh deleted file mode 100644 index 993a202683309a844321d348dbdb09295d49e5bc..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-get-offsets.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.GetOffsetShell "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-leader-election.sh b/src/alto/service/kafka_ale/bin/kafka-leader-election.sh deleted file mode 100644 index 88baef398de95cd1cf737a6d8589d9fdd5a5ac3f..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-leader-election.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-log-dirs.sh b/src/alto/service/kafka_ale/bin/kafka-log-dirs.sh deleted file mode 100644 index dc16edcc7c5ebefff42ea3089472560427f25727..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-log-dirs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-metadata-quorum.sh b/src/alto/service/kafka_ale/bin/kafka-metadata-quorum.sh deleted file mode 100644 index 24bedbded1e7dfeaa435f9aab62d56ecb15faa7b..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-metadata-quorum.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.MetadataQuorumCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-metadata-shell.sh b/src/alto/service/kafka_ale/bin/kafka-metadata-shell.sh deleted file mode 100644 index 289f0c1b51f27cd6e93126bc51c81ab4357440db..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-metadata-shell.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.shell.MetadataShell "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-mirror-maker.sh b/src/alto/service/kafka_ale/bin/kafka-mirror-maker.sh deleted file mode 100644 index 981f2711af960be2dc2dc3ce2374c6db3303aa1a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-mirror-maker.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-producer-perf-test.sh b/src/alto/service/kafka_ale/bin/kafka-producer-perf-test.sh deleted file mode 100644 index 73a62888a13d547fe21fc89d41035c775f26026f..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-producer-perf-test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi -exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-reassign-partitions.sh b/src/alto/service/kafka_ale/bin/kafka-reassign-partitions.sh deleted file mode 100644 index 4c7f1bc35e0a98b2ba978ce07ab9465c841d8b1b..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-reassign-partitions.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-replica-verification.sh b/src/alto/service/kafka_ale/bin/kafka-replica-verification.sh deleted file mode 100644 index 4960836c0d034f338bec16fdb5e7dccc4fa18821..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-replica-verification.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-run-class.sh b/src/alto/service/kafka_ale/bin/kafka-run-class.sh deleted file mode 100644 index 490f930b8cc4df8fd3a94c535f0d7198dcd20e69..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-run-class.sh +++ /dev/null @@ -1,343 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]" - exit 1 -fi - -# CYGWIN == 1 if Cygwin is detected, else 0. -if [[ $(uname -a) =~ "CYGWIN" ]]; then - CYGWIN=1 -else - CYGWIN=0 -fi - -if [ -z "$INCLUDE_TEST_JARS" ]; then - INCLUDE_TEST_JARS=false -fi - -# Exclude jars not necessary for running commands. -regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc|connect-file.*\.jar)$" -should_include_file() { - if [ "$INCLUDE_TEST_JARS" = true ]; then - return 0 - fi - file=$1 - if [ -z "$(echo "$file" | egrep "$regex")" ] ; then - return 0 - else - return 1 - fi -} - -base_dir=$(dirname $0)/.. - -if [ -z "$SCALA_VERSION" ]; then - SCALA_VERSION=2.13.6 - if [[ -f "$base_dir/gradle.properties" ]]; then - SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2` - fi -fi - -if [ -z "$SCALA_BINARY_VERSION" ]; then - SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.') -fi - -# run ./gradlew copyDependantLibs to get all dependant jars in a local dir -shopt -s nullglob -if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then - for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*; - do - CLASSPATH="$CLASSPATH:$dir/*" - done -fi - -for file in "$base_dir"/examples/build/libs/kafka-examples*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then - clients_lib_dir=$(dirname $0)/../clients/build/libs - streams_lib_dir=$(dirname $0)/../streams/build/libs - streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION} -else - clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs - streams_lib_dir=$clients_lib_dir - streams_dependant_clients_lib_dir=$streams_lib_dir -fi - - -for file in "$clients_lib_dir"/kafka-clients*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -for file in "$streams_lib_dir"/kafka-streams*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then - for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar; - do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi - done -else - VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'` - SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number - for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar; - do - if should_include_file "$file"; then - CLASSPATH="$file":"$CLASSPATH" - fi - done - if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH" - fi - if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH" - CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH" - fi -fi - -for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar; -do - CLASSPATH="$CLASSPATH":"$file" -done - -for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar; -do - CLASSPATH="$CLASSPATH":"$file" -done - -for file in "$base_dir"/shell/build/libs/kafka-shell*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -for dir in "$base_dir"/shell/build/dependant-libs-${SCALA_VERSION}*; -do - CLASSPATH="$CLASSPATH:$dir/*" -done - -for file in "$base_dir"/tools/build/libs/kafka-tools*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*; -do - CLASSPATH="$CLASSPATH:$dir/*" -done - -for file in "$base_dir"/trogdor/build/libs/trogdor-*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -for dir in "$base_dir"/trogdor/build/dependant-libs-${SCALA_VERSION}*; -do - CLASSPATH="$CLASSPATH:$dir/*" -done - -for cc_pkg in "api" "transforms" "runtime" "mirror" "mirror-client" "json" "tools" "basic-auth-extension" -do - for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar; - do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi - done - if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then - CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*" - fi -done - -# classpath addition for release -for file in "$base_dir"/libs/*; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done - -for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar; -do - if should_include_file "$file"; then - CLASSPATH="$CLASSPATH":"$file" - fi -done -shopt -u nullglob - -if [ -z "$CLASSPATH" ] ; then - echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'" - exit 1 -fi - -# JMX settings -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " -fi - -# JMX port to use -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " -fi - -# Log directory to use -if [ "x$LOG_DIR" = "x" ]; then - LOG_DIR="$base_dir/logs" -fi - -# Log4j settings -if [ -z "$KAFKA_LOG4J_OPTS" ]; then - # Log to console. This is a tool. - LOG4J_DIR="$base_dir/config/tools-log4j.properties" - # If Cygwin is detected, LOG4J_DIR is converted to Windows format. - (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}") - KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}" -else - # create logs directory - if [ ! -d "$LOG_DIR" ]; then - mkdir -p "$LOG_DIR" - fi -fi - -# If Cygwin is detected, LOG_DIR is converted to Windows format. -(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}") -KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS" - -# Generic jvm settings you want to add -if [ -z "$KAFKA_OPTS" ]; then - KAFKA_OPTS="" -fi - -# Set Debug options if enabled -if [ "x$KAFKA_DEBUG" != "x" ]; then - - # Use default ports - DEFAULT_JAVA_DEBUG_PORT="5005" - - if [ -z "$JAVA_DEBUG_PORT" ]; then - JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT" - fi - - # Use the defaults if JAVA_DEBUG_OPTS was not set - DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT" - if [ -z "$JAVA_DEBUG_OPTS" ]; then - JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS" - fi - - echo "Enabling Java debug options: $JAVA_DEBUG_OPTS" - KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS" -fi - -# Which java to use -if [ -z "$JAVA_HOME" ]; then - JAVA="java" -else - JAVA="$JAVA_HOME/bin/java" -fi - -# Memory options -if [ -z "$KAFKA_HEAP_OPTS" ]; then - KAFKA_HEAP_OPTS="-Xmx256M" -fi - -# JVM performance options -# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported -if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then - KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true" -fi - -while [ $# -gt 0 ]; do - COMMAND=$1 - case $COMMAND in - -name) - DAEMON_NAME=$2 - CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out - shift 2 - ;; - -loggc) - if [ -z "$KAFKA_GC_LOG_OPTS" ]; then - GC_LOG_ENABLED="true" - fi - shift - ;; - -daemon) - DAEMON_MODE="true" - shift - ;; - *) - break - ;; - esac -done - -# GC options -GC_FILE_SUFFIX='-gc.log' -GC_LOG_FILE_NAME='' -if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then - GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX - - # The first segment of the version number, which is '1' for releases before Java 9 - # it then becomes '9', '10', ... - # Some examples of the first line of `java --version`: - # 8 -> java version "1.8.0_152" - # 9.0.4 -> java version "9.0.4" - # 10 -> java version "10" 2018-03-20 - # 10.0.1 -> java version "10.0.1" 2018-04-17 - # We need to match to the end of the line to prevent sed from printing the characters that do not match - JAVA_MAJOR_VERSION=$("$JAVA" -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p') - if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then - KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=100M" - else - KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M" - fi -fi - -# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank) -# Syntax used on the right side is native Bash string manipulation; for more details see -# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal" -CLASSPATH=${CLASSPATH#:} - -# If Cygwin is detected, classpath is converted to Windows format. -(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}") - -# Launch mode -if [ "x$DAEMON_MODE" = "xtrue" ]; then - nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null & -else - exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" -fi diff --git a/src/alto/service/kafka_ale/bin/kafka-server-start.sh b/src/alto/service/kafka_ale/bin/kafka-server-start.sh deleted file mode 100644 index 5a53126172de9d024891942d9f4748b7fcd4592b..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-server-start.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] server.properties [--override property=value]*" - exit 1 -fi -base_dir=$(dirname $0) - -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" -fi - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" -fi - -EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'} - -COMMAND=$1 -case $COMMAND in - -daemon) - EXTRA_ARGS="-daemon "$EXTRA_ARGS - shift - ;; - *) - ;; -esac - -exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-server-stop.sh b/src/alto/service/kafka_ale/bin/kafka-server-stop.sh deleted file mode 100644 index 437189f4a5b4ca92579f5803762e392eb31bb6f7..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-server-stop.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SIGNAL=${SIGNAL:-TERM} - -OSNAME=$(uname -s) -if [[ "$OSNAME" == "OS/390" ]]; then - if [ -z $JOBNAME ]; then - JOBNAME="KAFKSTRT" - fi - PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}') -elif [[ "$OSNAME" == "OS400" ]]; then - PIDS=$(ps -Af | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $2}') -else - PIDS=$(ps ax | grep ' kafka\.Kafka ' | grep java | grep -v grep | awk '{print $1}') -fi - -if [ -z "$PIDS" ]; then - echo "No kafka server to stop" - exit 1 -else - kill -s $SIGNAL $PIDS -fi diff --git a/src/alto/service/kafka_ale/bin/kafka-storage.sh b/src/alto/service/kafka_ale/bin/kafka-storage.sh deleted file mode 100644 index eef93423877f02afe15e9a517f76d700baba522d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-storage.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.StorageTool "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-streams-application-reset.sh b/src/alto/service/kafka_ale/bin/kafka-streams-application-reset.sh deleted file mode 100644 index 336373254004ea9b2027082b1efcedbf9ab8ddd4..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-streams-application-reset.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi - -exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-topics.sh b/src/alto/service/kafka_ale/bin/kafka-topics.sh deleted file mode 100644 index ad6a2d4d2a0160c0fba70820a70a6f6d208b5ca7..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-topics.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-transactions.sh b/src/alto/service/kafka_ale/bin/kafka-transactions.sh deleted file mode 100644 index 6fb523385557fd9e3aca85ab61144ab90f681a93..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-transactions.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.TransactionsCommand "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-verifiable-consumer.sh b/src/alto/service/kafka_ale/bin/kafka-verifiable-consumer.sh deleted file mode 100644 index 852847df03ba5cfe420d9337d5c76f810383092f..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-verifiable-consumer.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi -exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@" diff --git a/src/alto/service/kafka_ale/bin/kafka-verifiable-producer.sh b/src/alto/service/kafka_ale/bin/kafka-verifiable-producer.sh deleted file mode 100644 index b59bae7d2beae882715ce5b8cf83fe8ef3e3acf0..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/kafka-verifiable-producer.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M" -fi -exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@" diff --git a/src/alto/service/kafka_ale/bin/trogdor.sh b/src/alto/service/kafka_ale/bin/trogdor.sh deleted file mode 100644 index 3324c4ea8ec3e7ae8d687d67250d8bfd71ec98cf..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/trogdor.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -usage() { - cat <<EOF -The Trogdor fault injector. - -Usage: - $0 [action] [options] - -Actions: - agent: Run the trogdor agent. - coordinator: Run the trogdor coordinator. - client: Run the client which communicates with the trogdor coordinator. - agent-client: Run the client which communicates with the trogdor agent. - help: This help message. -EOF -} - -if [[ $# -lt 1 ]]; then - usage - exit 0 -fi -action="${1}" -shift -CLASS="" -case ${action} in - agent) CLASS="org.apache.kafka.trogdor.agent.Agent";; - coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";; - client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";; - agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";; - help) usage; exit 0;; - *) echo "Unknown action '${action}'. Type '$0 help' for help."; exit 1;; -esac - -export INCLUDE_TEST_JARS=1 -exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@" diff --git a/src/alto/service/kafka_ale/bin/windows/connect-distributed.bat b/src/alto/service/kafka_ale/bin/windows/connect-distributed.bat deleted file mode 100644 index 0535085bde50778aa9f768d625da8cc900f1da78..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/connect-distributed.bat +++ /dev/null @@ -1,34 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 connect-distributed.properties - EXIT /B 1 -) - -SetLocal -rem Using pushd popd to set BASE_DIR to the absolute path -pushd %~dp0..\.. -set BASE_DIR=%CD% -popd - -rem Log4j settings -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties -) - -"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/connect-standalone.bat b/src/alto/service/kafka_ale/bin/windows/connect-standalone.bat deleted file mode 100644 index 12ebb21dc9a852ffaf6c2c26b056b7cd324b5e9e..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/connect-standalone.bat +++ /dev/null @@ -1,34 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 connect-standalone.properties - EXIT /B 1 -) - -SetLocal -rem Using pushd popd to set BASE_DIR to the absolute path -pushd %~dp0..\.. -set BASE_DIR=%CD% -popd - -rem Log4j settings -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/connect-log4j.properties -) - -"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-acls.bat b/src/alto/service/kafka_ale/bin/windows/kafka-acls.bat deleted file mode 100644 index 8f0be85c0455a6b803d59146b560892a216ab067..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-acls.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-broker-api-versions.bat b/src/alto/service/kafka_ale/bin/windows/kafka-broker-api-versions.bat deleted file mode 100644 index f7ec72da55f2bf5ef763b4c1809358611ce142cd..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-broker-api-versions.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-configs.bat b/src/alto/service/kafka_ale/bin/windows/kafka-configs.bat deleted file mode 100644 index 3792a5d9b7e1766f792ae202f71140bed9b9e3c6..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-configs.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-console-consumer.bat b/src/alto/service/kafka_ale/bin/windows/kafka-console-consumer.bat deleted file mode 100644 index bbbd33656ad0ea7890505fc41f1f7b1103f057a2..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-console-consumer.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -SetLocal -set KAFKA_HEAP_OPTS=-Xmx512M -"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-console-producer.bat b/src/alto/service/kafka_ale/bin/windows/kafka-console-producer.bat deleted file mode 100644 index e1834bc5a8520b728cdc4ac676dba801d99ffeb1..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-console-producer.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -SetLocal -set KAFKA_HEAP_OPTS=-Xmx512M -"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-consumer-groups.bat b/src/alto/service/kafka_ale/bin/windows/kafka-consumer-groups.bat deleted file mode 100644 index e027b9e6bfe5934e757a5ae9433d7fe91da2bf3e..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-consumer-groups.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-consumer-perf-test.bat b/src/alto/service/kafka_ale/bin/windows/kafka-consumer-perf-test.bat deleted file mode 100644 index 606c784605aaf1b706ac98d1bfb6ea524e87ad89..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-consumer-perf-test.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -SetLocal -set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M -"%~dp0kafka-run-class.bat" kafka.tools.ConsumerPerformance %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-delegation-tokens.bat b/src/alto/service/kafka_ale/bin/windows/kafka-delegation-tokens.bat deleted file mode 100644 index 996537f8c020c4be8662a2aabcdc580083fd824c..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-delegation-tokens.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.DelegationTokenCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-delete-records.bat b/src/alto/service/kafka_ale/bin/windows/kafka-delete-records.bat deleted file mode 100644 index d07e05f88a22b51af97a450f092635adc8ebfd93..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-delete-records.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.DeleteRecordsCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-dump-log.bat b/src/alto/service/kafka_ale/bin/windows/kafka-dump-log.bat deleted file mode 100644 index 3a1473dc61bc706682b4a603212ac2a3cd88d31d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-dump-log.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.tools.DumpLogSegments %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-get-offsets.bat b/src/alto/service/kafka_ale/bin/windows/kafka-get-offsets.bat deleted file mode 100644 index 08b8e27d70fecb4a2b1424a019a2ae68b5780fee..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-get-offsets.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.tools.GetOffsetShell %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-leader-election.bat b/src/alto/service/kafka_ale/bin/windows/kafka-leader-election.bat deleted file mode 100644 index 0432a99b6e413fd2ef44fb5788397784f4ec0dc2..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-leader-election.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.LeaderElectionCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-log-dirs.bat b/src/alto/service/kafka_ale/bin/windows/kafka-log-dirs.bat deleted file mode 100644 index b490d47feaed621fbb1df96c312c3d0791279f9c..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-log-dirs.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.LogDirsCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-metatada-quorum.bat b/src/alto/service/kafka_ale/bin/windows/kafka-metatada-quorum.bat deleted file mode 100644 index 4ea8e3109f962da13c96693123ef7b19c9a6abc2..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-metatada-quorum.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.MetadataQuorumCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-mirror-maker.bat b/src/alto/service/kafka_ale/bin/windows/kafka-mirror-maker.bat deleted file mode 100644 index a1fae45112f221b874e6147d5137e9c419e7238a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-mirror-maker.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-producer-perf-test.bat b/src/alto/service/kafka_ale/bin/windows/kafka-producer-perf-test.bat deleted file mode 100644 index 917d2117fbbe5d690b6bd5c38fcd1be70bdae9e0..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-producer-perf-test.bat +++ /dev/null @@ -1,20 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -SetLocal -set KAFKA_HEAP_OPTS=-Xmx512M -"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-reassign-partitions.bat b/src/alto/service/kafka_ale/bin/windows/kafka-reassign-partitions.bat deleted file mode 100644 index 62b710d36e08090242f5e7402ca12db115f8bb70..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-reassign-partitions.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-replica-verification.bat b/src/alto/service/kafka_ale/bin/windows/kafka-replica-verification.bat deleted file mode 100644 index bf4805d7f64c003c905475ae99def021f87545a6..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-replica-verification.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.tools.ReplicaVerificationTool %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-run-class.bat b/src/alto/service/kafka_ale/bin/windows/kafka-run-class.bat deleted file mode 100644 index 26ef84a4f5c9ac7a044da87ca2ad7ac68bc34d7c..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-run-class.bat +++ /dev/null @@ -1,191 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -setlocal enabledelayedexpansion - -IF [%1] EQU [] ( - echo USAGE: %0 classname [opts] - EXIT /B 1 -) - -rem Using pushd popd to set BASE_DIR to the absolute path -pushd %~dp0..\.. -set BASE_DIR=%CD% -popd - -IF ["%SCALA_VERSION%"] EQU [""] ( - set SCALA_VERSION=2.13.6 -) - -IF ["%SCALA_BINARY_VERSION%"] EQU [""] ( - for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do ( - set FIRST=%%a - set SECOND=%%b - if ["!SECOND!"] EQU [""] ( - set SCALA_BINARY_VERSION=!FIRST! - ) else ( - set SCALA_BINARY_VERSION=!FIRST!.!SECOND! - ) - ) -) - -rem Classpath addition for kafka-core dependencies -for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do ( - call :concat "%%i" -) - -rem Classpath addition for kafka-examples -for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do ( - call :concat "%%i" -) - -rem Classpath addition for kafka-clients -for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do ( - call :concat "%%i" -) - -rem Classpath addition for kafka-streams -for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do ( - call :concat "%%i" -) - -rem Classpath addition for kafka-streams-examples -for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do ( - call :concat "%%i" -) - -for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do ( - call :concat "%%i" -) - -rem Classpath addition for kafka tools -for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do ( - call :concat "%%i" -) - -for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do ( - call :concat "%%i" -) - -for %%p in (api runtime file json tools) do ( - for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do ( - call :concat "%%i" - ) - if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" ( - call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*" - ) -) - -rem Classpath addition for release -for %%i in ("%BASE_DIR%\libs\*") do ( - call :concat "%%i" -) - -rem Classpath addition for core -for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do ( - call :concat "%%i" -) - -rem JMX settings -IF ["%KAFKA_JMX_OPTS%"] EQU [""] ( - set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -) - -rem JMX port to use -IF ["%JMX_PORT%"] NEQ [""] ( - set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT% -) - -rem Log directory to use -IF ["%LOG_DIR%"] EQU [""] ( - set LOG_DIR=%BASE_DIR%/logs -) - -rem Log4j settings -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties -) ELSE ( - rem create logs directory - IF not exist "%LOG_DIR%" ( - mkdir "%LOG_DIR%" - ) -) - -set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%" - -rem Generic jvm settings you want to add -IF ["%KAFKA_OPTS%"] EQU [""] ( - set KAFKA_OPTS= -) - -set DEFAULT_JAVA_DEBUG_PORT=5005 -set DEFAULT_DEBUG_SUSPEND_FLAG=n -rem Set Debug options if enabled -IF ["%KAFKA_DEBUG%"] NEQ [""] ( - - - IF ["%JAVA_DEBUG_PORT%"] EQU [""] ( - set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT% - ) - - IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] ( - set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG% - ) - set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT! - - IF ["%JAVA_DEBUG_OPTS%"] EQU [""] ( - set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS! - ) - - echo Enabling Java debug options: !JAVA_DEBUG_OPTS! - set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS! -) - -rem Which java to use -IF ["%JAVA_HOME%"] EQU [""] ( - set JAVA=java -) ELSE ( - set JAVA="%JAVA_HOME%/bin/java" -) - -rem Memory options -IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( - set KAFKA_HEAP_OPTS=-Xmx256M -) - -rem JVM performance options -IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] ( - set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true -) - -IF not defined CLASSPATH ( - echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll' - EXIT /B 2 -) - -set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp "%CLASSPATH%" %KAFKA_OPTS% %* -rem echo. -rem echo %COMMAND% -rem echo. -%COMMAND% - -goto :eof -:concat -IF not defined CLASSPATH ( - set CLASSPATH="%~1" -) ELSE ( - set CLASSPATH=%CLASSPATH%;"%~1" -) diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-server-start.bat b/src/alto/service/kafka_ale/bin/windows/kafka-server-start.bat deleted file mode 100644 index 8624eda9ff08971d9c2466c09c903191f0c2ed99..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-server-start.bat +++ /dev/null @@ -1,38 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 server.properties - EXIT /B 1 -) - -SetLocal -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties -) -IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( - rem detect OS architecture - wmic os get osarchitecture | find /i "32-bit" >nul 2>&1 - IF NOT ERRORLEVEL 1 ( - rem 32-bit OS - set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M - ) ELSE ( - rem 64-bit OS - set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G - ) -) -"%~dp0kafka-run-class.bat" kafka.Kafka %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-server-stop.bat b/src/alto/service/kafka_ale/bin/windows/kafka-server-stop.bat deleted file mode 100644 index 676577cf958645fde5ec95cb521f7e02f1616099..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-server-stop.bat +++ /dev/null @@ -1,18 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete -rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-storage.bat b/src/alto/service/kafka_ale/bin/windows/kafka-storage.bat deleted file mode 100644 index 4a0e458a623b82bd6f2dea9ac1626e107aea7226..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-storage.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.tools.StorageTool %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-streams-application-reset.bat b/src/alto/service/kafka_ale/bin/windows/kafka-streams-application-reset.bat deleted file mode 100644 index 1cfb6f518c824e0e77db2272ef65ca0d49662e21..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-streams-application-reset.bat +++ /dev/null @@ -1,23 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -SetLocal -IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( - set KAFKA_HEAP_OPTS=-Xmx512M -) - -"%~dp0kafka-run-class.bat" kafka.tools.StreamsResetter %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-topics.bat b/src/alto/service/kafka_ale/bin/windows/kafka-topics.bat deleted file mode 100644 index 677b09d077d99691b8b1f0197f2e4a3676e4b27b..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-topics.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/kafka-transactions.bat b/src/alto/service/kafka_ale/bin/windows/kafka-transactions.bat deleted file mode 100644 index 9bb7585fca9da8751816cc8dde2594e26e88db38..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/kafka-transactions.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -"%~dp0kafka-run-class.bat" org.apache.kafka.tools.TransactionsCommand %* diff --git a/src/alto/service/kafka_ale/bin/windows/zookeeper-server-start.bat b/src/alto/service/kafka_ale/bin/windows/zookeeper-server-start.bat deleted file mode 100644 index f201a585135d2db55a15d8e45004feb6a536fe7f..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/zookeeper-server-start.bat +++ /dev/null @@ -1,30 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 zookeeper.properties - EXIT /B 1 -) - -SetLocal -IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( - set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties -) -IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( - set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M -) -"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %* -EndLocal diff --git a/src/alto/service/kafka_ale/bin/windows/zookeeper-server-stop.bat b/src/alto/service/kafka_ale/bin/windows/zookeeper-server-stop.bat deleted file mode 100644 index 8b57dd8d63069ef579e7f41a60bba71f1018e29d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/zookeeper-server-stop.bat +++ /dev/null @@ -1,17 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete diff --git a/src/alto/service/kafka_ale/bin/windows/zookeeper-shell.bat b/src/alto/service/kafka_ale/bin/windows/zookeeper-shell.bat deleted file mode 100644 index f1c86c430c1709744a0f31acbc70706dc27cfbec..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/windows/zookeeper-shell.bat +++ /dev/null @@ -1,22 +0,0 @@ -@echo off -rem Licensed to the Apache Software Foundation (ASF) under one or more -rem contributor license agreements. See the NOTICE file distributed with -rem this work for additional information regarding copyright ownership. -rem The ASF licenses this file to You under the Apache License, Version 2.0 -rem (the "License"); you may not use this file except in compliance with -rem the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, software -rem distributed under the License is distributed on an "AS IS" BASIS, -rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -rem See the License for the specific language governing permissions and -rem limitations under the License. - -IF [%1] EQU [] ( - echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...] - EXIT /B 1 -) - -"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %* diff --git a/src/alto/service/kafka_ale/bin/zookeeper-security-migration.sh b/src/alto/service/kafka_ale/bin/zookeeper-security-migration.sh deleted file mode 100644 index 722bde7cc4c621cf8f7f7279397f7776fb66eff9..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/zookeeper-security-migration.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@" diff --git a/src/alto/service/kafka_ale/bin/zookeeper-server-start.sh b/src/alto/service/kafka_ale/bin/zookeeper-server-start.sh deleted file mode 100644 index bd9c1142817c082b19b1c71cc39fb0149ed7dba1..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/zookeeper-server-start.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 [-daemon] zookeeper.properties" - exit 1 -fi -base_dir=$(dirname $0) - -if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" -fi - -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" -fi - -EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'} - -COMMAND=$1 -case $COMMAND in - -daemon) - EXTRA_ARGS="-daemon "$EXTRA_ARGS - shift - ;; - *) - ;; -esac - -exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@" diff --git a/src/alto/service/kafka_ale/bin/zookeeper-server-stop.sh b/src/alto/service/kafka_ale/bin/zookeeper-server-stop.sh deleted file mode 100644 index 11665f32707f872698de907ea3580db83d427d14..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/zookeeper-server-stop.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SIGNAL=${SIGNAL:-TERM} - -OSNAME=$(uname -s) -if [[ "$OSNAME" == "OS/390" ]]; then - if [ -z $JOBNAME ]; then - JOBNAME="ZKEESTRT" - fi - PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}') -elif [[ "$OSNAME" == "OS400" ]]; then - PIDS=$(ps -Af | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $2}') -else - PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}') -fi - -if [ -z "$PIDS" ]; then - echo "No zookeeper server to stop" - exit 1 -else - kill -s $SIGNAL $PIDS -fi diff --git a/src/alto/service/kafka_ale/bin/zookeeper-shell.sh b/src/alto/service/kafka_ale/bin/zookeeper-shell.sh deleted file mode 100644 index 2f1d0f2c61670a56f89cb62c52a1a3373fafeab1..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/bin/zookeeper-shell.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [ $# -lt 1 ]; -then - echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]" - exit 1 -fi - -exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@" diff --git a/src/alto/service/kafka_ale/config/connect-console-sink.properties b/src/alto/service/kafka_ale/config/connect-console-sink.properties deleted file mode 100644 index e240a8f0dd8dd41b22c77fc80671e6ece995dba4..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-console-sink.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name=local-console-sink -connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector -tasks.max=1 -topics=connect-test \ No newline at end of file diff --git a/src/alto/service/kafka_ale/config/connect-console-source.properties b/src/alto/service/kafka_ale/config/connect-console-source.properties deleted file mode 100644 index d0e20690e7c6446c7a8e6de8d47b17148fb9a0cf..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-console-source.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name=local-console-source -connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector -tasks.max=1 -topic=connect-test \ No newline at end of file diff --git a/src/alto/service/kafka_ale/config/connect-distributed.properties b/src/alto/service/kafka_ale/config/connect-distributed.properties deleted file mode 100644 index cedad9a6823ef7ed6a93e139dd77dc9ac1b3648d..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-distributed.properties +++ /dev/null @@ -1,89 +0,0 @@ -## -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## - -# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended -# to be used with the examples, and some settings may differ from those used in a production system, especially -# the `bootstrap.servers` and those specifying replication factors. - -# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. -bootstrap.servers=localhost:9092 - -# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs -group.id=connect-cluster - -# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will -# need to configure these based on the format they want their data in when loaded from or stored into Kafka -key.converter=org.apache.kafka.connect.json.JsonConverter -value.converter=org.apache.kafka.connect.json.JsonConverter -# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply -# it to -key.converter.schemas.enable=true -value.converter.schemas.enable=true - -# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted. -# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create -# the topic before starting Kafka Connect if a specific topic configuration is needed. -# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. -# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able -# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. -offset.storage.topic=connect-offsets -offset.storage.replication.factor=1 -#offset.storage.partitions=25 - -# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, -# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create -# the topic before starting Kafka Connect if a specific topic configuration is needed. -# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. -# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able -# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. -config.storage.topic=connect-configs -config.storage.replication.factor=1 - -# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted. -# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create -# the topic before starting Kafka Connect if a specific topic configuration is needed. -# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. -# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able -# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. -status.storage.topic=connect-status -status.storage.replication.factor=1 -#status.storage.partitions=5 - -# Flush much faster than normal, which is useful for testing/debugging -offset.flush.interval.ms=10000 - -# List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS. -# Specify hostname as 0.0.0.0 to bind to all interfaces. -# Leave hostname empty to bind to default interface. -# Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084" -#listeners=HTTP://:8083 - -# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers. -# If not set, it uses the value for "listeners" if configured. -#rest.advertised.host.name= -#rest.advertised.port= -#rest.advertised.listener= - -# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins -# (connectors, converters, transformations). The list should consist of top level directories that include -# any combination of: -# a) directories immediately containing jars with plugins and their dependencies -# b) uber-jars with plugins and their dependencies -# c) directories immediately containing the package directory structure of classes of plugins and their dependencies -# Examples: -# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, -#plugin.path= diff --git a/src/alto/service/kafka_ale/config/connect-file-sink.properties b/src/alto/service/kafka_ale/config/connect-file-sink.properties deleted file mode 100644 index 594ccc6e953c5494a9ac8958b848a267e1631dd8..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-file-sink.properties +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name=local-file-sink -connector.class=FileStreamSink -tasks.max=1 -file=test.sink.txt -topics=connect-test \ No newline at end of file diff --git a/src/alto/service/kafka_ale/config/connect-file-source.properties b/src/alto/service/kafka_ale/config/connect-file-source.properties deleted file mode 100644 index 599cf4cb2ac79e7c015f6e7b78fd8eb037cb2091..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-file-source.properties +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name=local-file-source -connector.class=FileStreamSource -tasks.max=1 -file=test.txt -topic=connect-test \ No newline at end of file diff --git a/src/alto/service/kafka_ale/config/connect-log4j.properties b/src/alto/service/kafka_ale/config/connect-log4j.properties deleted file mode 100644 index 157d5931b6d5927cf8a711ea16d6d624c5965cc1..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-log4j.properties +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=INFO, stdout, connectAppender - -# Send the logs to the console. -# -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout - -# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the -# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed -# and copied in the same directory but with a filename that ends in the `DatePattern` option. -# -log4j.appender.connectAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.connectAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log -log4j.appender.connectAppender.layout=org.apache.log4j.PatternLayout - -# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information -# in the log messages, where appropriate. This makes it easier to identify those log messages that apply to a -# specific connector. -# -connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n - -log4j.appender.stdout.layout.ConversionPattern=${connect.log.pattern} -log4j.appender.connectAppender.layout.ConversionPattern=${connect.log.pattern} - -log4j.logger.org.apache.zookeeper=ERROR -log4j.logger.org.reflections=ERROR diff --git a/src/alto/service/kafka_ale/config/connect-mirror-maker.properties b/src/alto/service/kafka_ale/config/connect-mirror-maker.properties deleted file mode 100644 index 40afda5e4ad68d3c76345d63b4b5bb1d4a4bf301..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-mirror-maker.properties +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under A or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see org.apache.kafka.clients.consumer.ConsumerConfig for more details - -# Sample MirrorMaker 2.0 top-level configuration file -# Run with ./bin/connect-mirror-maker.sh connect-mirror-maker.properties - -# specify any number of cluster aliases -clusters = A, B - -# connection information for each cluster -# This is a comma separated host:port pairs for each cluster -# for e.g. "A_host1:9092, A_host2:9092, A_host3:9092" -A.bootstrap.servers = A_host1:9092, A_host2:9092, A_host3:9092 -B.bootstrap.servers = B_host1:9092, B_host2:9092, B_host3:9092 - -# enable and configure individual replication flows -A->B.enabled = true - -# regex which defines which topics gets replicated. For eg "foo-.*" -A->B.topics = .* - -B->A.enabled = true -B->A.topics = .* - -# Setting replication factor of newly created remote topics -replication.factor=1 - -############################# Internal Topic Settings ############################# -# The replication factor for mm2 internal topics "heartbeats", "B.checkpoints.internal" and -# "mm2-offset-syncs.B.internal" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -checkpoints.topic.replication.factor=1 -heartbeats.topic.replication.factor=1 -offset-syncs.topic.replication.factor=1 - -# The replication factor for connect internal topics "mm2-configs.B.internal", "mm2-offsets.B.internal" and -# "mm2-status.B.internal" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offset.storage.replication.factor=1 -status.storage.replication.factor=1 -config.storage.replication.factor=1 - -# customize as needed -# replication.policy.separator = _ -# sync.topic.acls.enabled = false -# emit.heartbeats.interval.seconds = 5 diff --git a/src/alto/service/kafka_ale/config/connect-standalone.properties b/src/alto/service/kafka_ale/config/connect-standalone.properties deleted file mode 100644 index 24f8e07611e34ad739523d5f6ab2d014716a899c..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/connect-standalone.properties +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# These are defaults. This file just demonstrates how to override some settings. -bootstrap.servers=localhost:9092 - -# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will -# need to configure these based on the format they want their data in when loaded from or stored into Kafka -key.converter=org.apache.kafka.connect.json.JsonConverter -value.converter=org.apache.kafka.connect.json.JsonConverter -# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply -# it to -key.converter.schemas.enable=true -value.converter.schemas.enable=true - -offset.storage.file.filename=/tmp/connect.offsets -# Flush much faster than normal, which is useful for testing/debugging -offset.flush.interval.ms=10000 - -# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins -# (connectors, converters, transformations). The list should consist of top level directories that include -# any combination of: -# a) directories immediately containing jars with plugins and their dependencies -# b) uber-jars with plugins and their dependencies -# c) directories immediately containing the package directory structure of classes of plugins and their dependencies -# Note: symlinks will be followed to discover dependencies or plugins. -# Examples: -# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, -plugin.path=libs/connect-file-3.3.1.jar diff --git a/src/alto/service/kafka_ale/config/consumer.properties b/src/alto/service/kafka_ale/config/consumer.properties deleted file mode 100644 index 01bb12eb0899f43945307afeba09e6b96013defa..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/consumer.properties +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see org.apache.kafka.clients.consumer.ConsumerConfig for more details - -# list of brokers used for bootstrapping knowledge about the rest of the cluster -# format: host1:port1,host2:port2 ... -bootstrap.servers=localhost:9092 - -# consumer group id -group.id=test-consumer-group - -# What to do when there is no initial offset in Kafka or if the current -# offset does not exist any more on the server: latest, earliest, none -#auto.offset.reset= diff --git a/src/alto/service/kafka_ale/config/kraft/README.md b/src/alto/service/kafka_ale/config/kraft/README.md deleted file mode 100644 index c850655133302625f120b531543788f69cf182e7..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/kraft/README.md +++ /dev/null @@ -1,168 +0,0 @@ -KRaft (aka KIP-500) mode -========================================================= - -# Introduction -It is now possible to run Apache Kafka without Apache ZooKeeper! We call this the [Kafka Raft metadata mode](https://cwiki.apache.org/confluence/display/KAFKA/KIP-500%3A+Replace+ZooKeeper+with+a+Self-Managed+Metadata+Quorum), typically shortened to `KRaft mode`. -`KRaft` is intended to be pronounced like `craft` (as in `craftsmanship`). - -When the Kafka cluster is in KRaft mode, it does not store its metadata in ZooKeeper. In fact, you do not have to run ZooKeeper at all, because it stores its metadata in a KRaft quorum of controller nodes. - -KRaft mode has many benefits -- some obvious, and some not so obvious. Clearly, it is nice to manage and configure one service rather than two services. In addition, you can now run a single process Kafka cluster. -Most important of all, KRaft mode is more scalable. We expect to be able to [support many more topics and partitions](https://www.confluent.io/kafka-summit-san-francisco-2019/kafka-needs-no-keeper/) in this mode. - -# Quickstart - -## Generate a cluster ID -The first step is to generate an ID for your new cluster, using the kafka-storage tool: - -~~~~ -$ ./bin/kafka-storage.sh random-uuid -xtzWWN4bTjitpL3kfd9s5g -~~~~ - -## Format Storage Directories -The next step is to format your storage directories. If you are running in single-node mode, you can do this with one command: - -~~~~ -$ ./bin/kafka-storage.sh format -t <uuid> -c ./config/kraft/server.properties -Formatting /tmp/kraft-combined-logs -~~~~ - -If you are using multiple nodes, then you should run the format command on each node. Be sure to use the same cluster ID for each one. - -This example configures the node as both a broker and controller (i.e. `process.roles=broker,controller`). It is also possible to run the broker and controller nodes separately. -Please see [here](https://github.com/apache/kafka/blob/trunk/config/kraft/broker.properties) and [here](https://github.com/apache/kafka/blob/trunk/config/kraft/controller.properties) for example configurations. - -## Start the Kafka Server -Finally, you are ready to start the Kafka server on each node. - -~~~~ -$ ./bin/kafka-server-start.sh ./config/kraft/server.properties -[2021-02-26 15:37:11,071] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) -[2021-02-26 15:37:11,294] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) -[2021-02-26 15:37:11,466] INFO [Log partition=__cluster_metadata-0, dir=/tmp/kraft-combined-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) -[2021-02-26 15:37:11,509] INFO [raft-expiration-reaper]: Starting (kafka.raft.TimingWheelExpirationService$ExpiredOperationReaper) -[2021-02-26 15:37:11,640] INFO [RaftManager nodeId=1] Completed transition to Unattached(epoch=0, voters=[1], electionTimeoutMs=9037) (org.apache.kafka.raft.QuorumState) -... -~~~~ - -Just like with a ZooKeeper based broker, you can connect to port 9092 (or whatever port you configured) to perform administrative operations or produce or consume data. - -~~~~ -$ ./bin/kafka-topics.sh --create --topic foo --partitions 1 --replication-factor 1 --bootstrap-server localhost:9092 -Created topic foo. -~~~~ - -# Deployment - -## Controller Servers -In KRaft mode, only a small group of specially selected servers can act as controllers (unlike the ZooKeeper-based mode, where any server can become the -Controller). The specially selected controller servers will participate in the metadata quorum. Each controller server is either active, or a hot -standby for the current active controller server. - -You will typically select 3 or 5 servers for this role, depending on factors like cost and the number of concurrent failures your system should withstand -without availability impact. Just like with ZooKeeper, you must keep a majority of the controllers alive in order to maintain availability. So if you have 3 -controllers, you can tolerate 1 failure; with 5 controllers, you can tolerate 2 failures. - -## Process Roles -Each Kafka server now has a new configuration key called `process.roles` which can have the following values: - -* If `process.roles` is set to `broker`, the server acts as a broker in KRaft mode. -* If `process.roles` is set to `controller`, the server acts as a controller in KRaft mode. -* If `process.roles` is set to `broker,controller`, the server acts as both a broker and a controller in KRaft mode. -* If `process.roles` is not set at all then we are assumed to be in ZooKeeper mode. As mentioned earlier, you can't currently transition back and forth between ZooKeeper mode and KRaft mode without reformatting. - -Nodes that act as both brokers and controllers are referred to as "combined" nodes. Combined nodes are simpler to operate for simple use cases and allow you to avoid -some fixed memory overheads associated with JVMs. The key disadvantage is that the controller will be less isolated from the rest of the system. For example, if activity on the broker causes an out of -memory condition, the controller part of the server is not isolated from that OOM condition. - -## Quorum Voters -All nodes in the system must set the `controller.quorum.voters` configuration. This identifies the quorum controller servers that should be used. All the controllers must be enumerated. -This is similar to how, when using ZooKeeper, the `zookeeper.connect` configuration must contain all the ZooKeeper servers. Unlike with the ZooKeeper config, however, `controller.quorum.voters` -also has IDs for each node. The format is id1@host1:port1,id2@host2:port2, etc. - -So if you have 10 brokers and 3 controllers named controller1, controller2, controller3, you might have the following configuration on controller1: -``` -process.roles=controller -node.id=1 -listeners=CONTROLLER://controller1.example.com:9093 -controller.quorum.voters=1@controller1.example.com:9093,2@controller2.example.com:9093,3@controller3.example.com:9093 -``` - -Each broker and each controller must set `controller.quorum.voters`. Note that the node ID supplied in the `controller.quorum.voters` configuration must match that supplied to the server. -So on controller1, node.id must be set to 1, and so forth. Note that there is no requirement for controller IDs to start at 0 or 1. However, the easiest and least confusing way to allocate -node IDs is probably just to give each server a numeric ID, starting from 0. Also note that each node ID must be unique across all the nodes in a particular cluster; no two nodes can have the same node ID regardless of their `process.roles` values. - -Note that clients never need to configure `controller.quorum.voters`; only servers do. - -## Kafka Storage Tool -As described above in the QuickStart section, you must use the `kafka-storage.sh` tool to generate a cluster ID for your new cluster, and then run the format command on each node before starting the node. - -This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster UUID automatically. One reason for the change -is that auto-formatting can sometimes obscure an error condition. For example, under UNIX, if a data directory can't be mounted, it may show up as blank. In this case, auto-formatting would be the wrong thing to do. - -This is particularly important for the metadata log maintained by the controller servers. If two controllers out of three controllers were able to start with blank logs, a leader might be able to be elected with -nothing in the log, which would cause all metadata to be lost. - -# Missing Features - -The following features have not yet been fully implemented: - -* Configuring SCRAM users via the administrative API -* Supporting JBOD configurations with multiple storage directories -* Modifying certain dynamic configurations on the standalone KRaft controller -* Delegation tokens -* Upgrade from ZooKeeper mode - -# Debugging -If you encounter an issue, you might want to take a look at the metadata log. - -## kafka-dump-log -One way to view the metadata log is with kafka-dump-log.sh tool, like so: - -~~~~ -$ ./bin/kafka-dump-log.sh --cluster-metadata-decoder --skip-record-metadata --files /tmp/kraft-combined-logs/__cluster_metadata-0/*.log -Dumping /tmp/kraft-combined-logs/__cluster_metadata-0/00000000000000000000.log -Starting offset: 0 -baseOffset: 0 lastOffset: 0 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: true position: 0 CreateTime: 1614382631640 size: 89 magic: 2 compresscodec: NONE crc: 1438115474 isvalid: true - -baseOffset: 1 lastOffset: 1 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 89 CreateTime: 1614382632329 size: 137 magic: 2 compresscodec: NONE crc: 1095855865 isvalid: true - payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"P3UFsWoNR-erL9PK98YLsA","brokerEpoch":0,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} -baseOffset: 2 lastOffset: 2 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 226 CreateTime: 1614382632453 size: 83 magic: 2 compresscodec: NONE crc: 455187130 isvalid: true - payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} -baseOffset: 3 lastOffset: 3 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 309 CreateTime: 1614382634484 size: 83 magic: 2 compresscodec: NONE crc: 4055692847 isvalid: true - payload: {"type":"FENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} -baseOffset: 4 lastOffset: 4 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: true position: 392 CreateTime: 1614382671857 size: 89 magic: 2 compresscodec: NONE crc: 1318571838 isvalid: true - -baseOffset: 5 lastOffset: 5 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 481 CreateTime: 1614382672440 size: 137 magic: 2 compresscodec: NONE crc: 841144615 isvalid: true - payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"RXRJu7cnScKRZOnWQGs86g","brokerEpoch":4,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} -baseOffset: 6 lastOffset: 6 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 618 CreateTime: 1614382672544 size: 83 magic: 2 compresscodec: NONE crc: 4155905922 isvalid: true - payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":4}} -baseOffset: 7 lastOffset: 8 count: 2 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 701 CreateTime: 1614382712158 size: 159 magic: 2 compresscodec: NONE crc: 3726758683 isvalid: true - payload: {"type":"TOPIC_RECORD","version":0,"data":{"name":"foo","topicId":"5zoAlv-xEh9xRANKXt1Lbg"}} - payload: {"type":"PARTITION_RECORD","version":0,"data":{"partitionId":0,"topicId":"5zoAlv-xEh9xRANKXt1Lbg","replicas":[1],"isr":[1],"removingReplicas":null,"addingReplicas":null,"leader":1,"leaderEpoch":0,"partitionEpoch":0}} -~~~~ - -## The Metadata Shell -Another tool for examining the metadata logs is the Kafka metadata shell. Just like the ZooKeeper shell, this allows you to inspect the metadata of the cluster. - -~~~~ -$ ./bin/kafka-metadata-shell.sh --snapshot /tmp/kraft-combined-logs/__cluster_metadata-0/00000000000000000000.log ->> ls / -brokers local metadataQuorum topicIds topics ->> ls /topics -foo ->> cat /topics/foo/0/data -{ - "partitionId" : 0, - "topicId" : "5zoAlv-xEh9xRANKXt1Lbg", - "replicas" : [ 1 ], - "isr" : [ 1 ], - "removingReplicas" : null, - "addingReplicas" : null, - "leader" : 1, - "leaderEpoch" : 0, - "partitionEpoch" : 0 -} ->> exit -~~~~ diff --git a/src/alto/service/kafka_ale/config/kraft/broker.properties b/src/alto/service/kafka_ale/config/kraft/broker.properties deleted file mode 100644 index 4edcc126d65f710a608c1c7118c5cd7dd2118587..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/kraft/broker.properties +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker - -# The node id associated with this instance's roles -node.id=2 - -# The connect string for the controller quorum -controller.quorum.voters=1@localhost:9093 - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. If not configured, the host name will be equal to the value of -# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=PLAINTEXT://localhost:9092 - -# Name of listener used for communication between brokers. -inter.broker.listener.name=PLAINTEXT - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=PLAINTEXT://localhost:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# This is required if running in KRaft mode. On a node with `process.roles=broker`, only the first listed listener will be used by the broker. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/tmp/kraft-broker-logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 diff --git a/src/alto/service/kafka_ale/config/kraft/controller.properties b/src/alto/service/kafka_ale/config/kraft/controller.properties deleted file mode 100644 index 9e8ad62054e2959e81705a83843e7ca5deeb8a62..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/kraft/controller.properties +++ /dev/null @@ -1,122 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=controller - -# The node id associated with this instance's roles -node.id=1 - -# The connect string for the controller quorum -controller.quorum.voters=1@localhost:9093 - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Note that only the controller listeners are allowed here when `process.roles=controller`, and this listener should be consistent with `controller.quorum.voters` value. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=CONTROLLER://:9093 - -# A comma-separated list of the names of the listeners used by the controller. -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/tmp/kraft-controller-logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 diff --git a/src/alto/service/kafka_ale/config/kraft/server.properties b/src/alto/service/kafka_ale/config/kraft/server.properties deleted file mode 100644 index ea84818b0c54fb27ea47abb4d52bcedc6c9d9745..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/kraft/server.properties +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id=1 - -# The connect string for the controller quorum -controller.quorum.voters=1@localhost:9093 - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=PLAINTEXT://:9092,CONTROLLER://:9093 - -# Name of listener used for communication between brokers. -inter.broker.listener.name=PLAINTEXT - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=PLAINTEXT://localhost:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/tmp/kraft-combined-logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 diff --git a/src/alto/service/kafka_ale/config/log4j.properties b/src/alto/service/kafka_ale/config/log4j.properties deleted file mode 100644 index 4cbce9d104291fe0a8c6d6acb7e6fc67dde4ecc5..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/log4j.properties +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Unspecified loggers and loggers with additivity=true output to server.log and stdout -# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise -log4j.rootLogger=INFO, stdout, kafkaAppender - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log -log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log -log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log -log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log -log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log -log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender -log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH -log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log -log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout -log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n - -# Change the line below to adjust ZK client logging -log4j.logger.org.apache.zookeeper=INFO - -# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) -log4j.logger.kafka=INFO -log4j.logger.org.apache.kafka=INFO - -# Change to DEBUG or TRACE to enable request logging -log4j.logger.kafka.request.logger=WARN, requestAppender -log4j.additivity.kafka.request.logger=false - -# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output -# related to the handling of requests -#log4j.logger.kafka.network.Processor=TRACE, requestAppender -#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender -#log4j.additivity.kafka.server.KafkaApis=false -log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender -log4j.additivity.kafka.network.RequestChannel$=false - -log4j.logger.kafka.controller=TRACE, controllerAppender -log4j.additivity.kafka.controller=false - -log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender -log4j.additivity.kafka.log.LogCleaner=false - -log4j.logger.state.change.logger=INFO, stateChangeAppender -log4j.additivity.state.change.logger=false - -# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses -log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender -log4j.additivity.kafka.authorizer.logger=false - diff --git a/src/alto/service/kafka_ale/config/producer.properties b/src/alto/service/kafka_ale/config/producer.properties deleted file mode 100644 index 3a999e7c17e8cc17ee9bc15885cd695e0eac7d90..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/producer.properties +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# see org.apache.kafka.clients.producer.ProducerConfig for more details - -############################# Producer Basics ############################# - -# list of brokers used for bootstrapping knowledge about the rest of the cluster -# format: host1:port1,host2:port2 ... -bootstrap.servers=localhost:9092 - -# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd -compression.type=none - -# name of the partitioner class for partitioning records; -# The default uses "sticky" partitioning logic which spreads the load evenly between partitions, but improves throughput by attempting to fill the batches sent to each partition. -#partitioner.class= - -# the maximum amount of time the client will wait for the response of a request -#request.timeout.ms= - -# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for -#max.block.ms= - -# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together -#linger.ms= - -# the maximum size of a request in bytes -#max.request.size= - -# the default batch size in bytes when batching multiple records sent to a partition -#batch.size= - -# the total bytes of memory the producer can use to buffer records waiting to be sent to the server -#buffer.memory= diff --git a/src/alto/service/kafka_ale/config/server.properties b/src/alto/service/kafka_ale/config/server.properties deleted file mode 100644 index ba26e80c6d94d298a6b9a89172b647c4207bbd5e..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/server.properties +++ /dev/null @@ -1,138 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required. -# See kafka.server.KafkaConfig for additional details and defaults -# - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. If not configured, the host name will be equal to the value of -# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -#listeners=SSL://localhost:9093 - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -#advertised.listeners=PLAINTEXT://your.host.name:9092 - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/tmp/kafka-logs - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -log.flush.interval.messages=100 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -#log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=18000 - - -############################# Group Coordinator Settings ############################# - -# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. -# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. -# The default value for this is 3 seconds. -# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. -# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. -group.initial.rebalance.delay.ms=0 diff --git a/src/alto/service/kafka_ale/config/tools-log4j.properties b/src/alto/service/kafka_ale/config/tools-log4j.properties deleted file mode 100644 index b19e343265fc3601423cfb08535a139639116375..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/tools-log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -log4j.rootLogger=WARN, stderr - -log4j.appender.stderr=org.apache.log4j.ConsoleAppender -log4j.appender.stderr.layout=org.apache.log4j.PatternLayout -log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n -log4j.appender.stderr.Target=System.err diff --git a/src/alto/service/kafka_ale/config/trogdor.conf b/src/alto/service/kafka_ale/config/trogdor.conf deleted file mode 100644 index 320cbe7560cd0c45f076de4801826424f9df3882..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/trogdor.conf +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_comment": [ - "Licensed to the Apache Software Foundation (ASF) under one or more", - "contributor license agreements. See the NOTICE file distributed with", - "this work for additional information regarding copyright ownership.", - "The ASF licenses this file to You under the Apache License, Version 2.0", - "(the \"License\"); you may not use this file except in compliance with", - "the License. You may obtain a copy of the License at", - "", - "http://www.apache.org/licenses/LICENSE-2.0", - "", - "Unless required by applicable law or agreed to in writing, software", - "distributed under the License is distributed on an \"AS IS\" BASIS,", - "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", - "See the License for the specific language governing permissions and", - "limitations under the License." - ], - "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": { - "node0": { - "hostname": "localhost", - "trogdor.agent.port": 8888, - "trogdor.coordinator.port": 8889 - } - } -} diff --git a/src/alto/service/kafka_ale/config/zookeeper.properties b/src/alto/service/kafka_ale/config/zookeeper.properties deleted file mode 100644 index 90f4332ec31cf5e6612daa5fb75c4ac0d0093dd8..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/config/zookeeper.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# the directory where the snapshot is stored. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# disable the per-ip limit on the number of connections since this is a non-production config -maxClientCnxns=0 -# Disable the adminserver by default to avoid port conflicts. -# Set the port to something non-conflicting if choosing to enable this -admin.enableServer=false -# admin.serverPort=8080 diff --git a/src/alto/service/kafka_ale/kafka_api.py b/src/alto/service/kafka_ale/kafka_api.py deleted file mode 100644 index 4a61bdad92a96f8b2dc893f910bf2a1d41039c88..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/kafka_api.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 - -from kafka import KafkaProducer -from kafka import KafkaConsumer -import json -import os - - -class AltoProducer: - - def __init__(self, ip_k, port_k): - #self.producer = KafkaProducer(bootstrap_servers=ip_k + ':' + port_k, value_serializer=lambda v: json.dumps(v).encode('utf-8')) - self.producer = KafkaProducer(bootstrap_servers=ip_k + ':' + port_k) - self.metrics = {} - #print("Definición realizada") - - def envio_alto(self, topic, msg, debug): - """ Realize the deliver and waits until the response comes. - Sends msg to the topic queue of the server defined in the producer definition. - """ - try: - future = self.producer.send(topic, value=bytes(str(msg), 'utf-8')) - #future = self.producer.send('alto-costes', b'PRUEBAfinal') - result = future.get() - if debug: - print(result) - except Exception as e: - print(str(e)) - - def get_metrics(self): - """ Return the metrics to the API client. - """ - self.metrics = self.producer.metrics() - return self.metrics - - def envio_alto_archivo(self, topic, nfile, npath): - """Writes the nfile content in the queue defined by the topic. - """ - full_path = os.path.join(npath, nfile) - try: - #in_path = open(full_path, 'r') - in_path = open('/root/cost_map.json', 'r') - self.producer.send( topic, bytes(in_path.read(), 'utf-8') ) - return True - except Exception as e: - print(str(e)) - return False - - #Ampliable con serializaciones o con colas divididas en varios bloques. - - -class AltoConsumer: - def __init__(self, ip_k, port_k, topic): - comodin = ip_k + ":" + port_k - self.consumer = KafkaConsumer(topic, bootstrap_servers=comodin) - self.metrics = {} - - def recepcion_alto_total(self): - """ Reads each msg received by the consumer and returns a list of msgs. - """ - msgs = [] - for msg in self.consumer: - msgs.append(msg) - print(msgs) - - def recepcion_alto(self): - """ Returns the first unreaded msg received by the consumer """ - return next(self.consumer) - - def get_metrics(self): - """ Return the metrics to the API client. - """ - self.metrics = self.consumer.metrics() - return self.metrics - - #Ampliable con des-serializaciones o con distintos tipos de recepciones. - - diff --git a/src/alto/service/kafka_ale/launcher b/src/alto/service/kafka_ale/launcher deleted file mode 100644 index 9374d05f5f0bfff066a99b057e9abd148bdbfc42..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/launcher +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -bin/zookeeper-server-start.sh config/zookeeper.properties & #1>logs/launcher-ultima-actividad.log 2>>logs/launcher.log & -sleep 3 -bin/kafka-server-start.sh config/server.properties #1>>logs/launcher-ultima-actividad.log 2>>logs/launcher.log & diff --git a/src/alto/service/kafka_ale/libs/activation-1.1.1.jar b/src/alto/service/kafka_ale/libs/activation-1.1.1.jar deleted file mode 100644 index 1b703ab283e0cddabf9c1b5e28658f9198c0def4..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/activation-1.1.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/aopalliance-repackaged-2.6.1.jar b/src/alto/service/kafka_ale/libs/aopalliance-repackaged-2.6.1.jar deleted file mode 100644 index 35502f09dcaa845e9ad63fa1ea6f5989cf97731a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/aopalliance-repackaged-2.6.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/argparse4j-0.7.0.jar b/src/alto/service/kafka_ale/libs/argparse4j-0.7.0.jar deleted file mode 100644 index b1865dd3827eada57d396d4fc1e012ff990c5f3b..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/argparse4j-0.7.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/audience-annotations-0.5.0.jar b/src/alto/service/kafka_ale/libs/audience-annotations-0.5.0.jar deleted file mode 100644 index 52491a7ea2048e9ab82534472a1c9fff7396d031..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/audience-annotations-0.5.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/commons-cli-1.4.jar b/src/alto/service/kafka_ale/libs/commons-cli-1.4.jar deleted file mode 100644 index 22deb3089e2f79a983406bd13a75a3e6238afdcf..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/commons-cli-1.4.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/commons-lang3-3.12.0.jar b/src/alto/service/kafka_ale/libs/commons-lang3-3.12.0.jar deleted file mode 100644 index 4d434a2a4554815584365348ea2cf00cdfe3d5f9..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/commons-lang3-3.12.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/commons-lang3-3.8.1.jar b/src/alto/service/kafka_ale/libs/commons-lang3-3.8.1.jar deleted file mode 100644 index 2c65ce67d5c2b746e0583e4879c35ed0751b505e..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/commons-lang3-3.8.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-api-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-api-3.3.1.jar deleted file mode 100644 index 956583a6abf965dd209a6457d09aba9c36a794c5..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-api-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-basic-auth-extension-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-basic-auth-extension-3.3.1.jar deleted file mode 100644 index fff510a098f40f8fcf48e1e15f245f15146885d7..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-basic-auth-extension-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-file-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-file-3.3.1.jar deleted file mode 100644 index 97f11a852a36b312dc02d38376e86bbd1ea5da24..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-file-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-json-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-json-3.3.1.jar deleted file mode 100644 index 45c46bd6a6e2a4f11d540503b253db102f566201..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-json-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-mirror-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-mirror-3.3.1.jar deleted file mode 100644 index d7c430457ce08ac60ddf9338b166d52ce267f3cc..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-mirror-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-mirror-client-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-mirror-client-3.3.1.jar deleted file mode 100644 index d15a9c03ec58a5f399efea58d02775e18d5bcb49..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-mirror-client-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-runtime-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-runtime-3.3.1.jar deleted file mode 100644 index d84a84ef4291f33abaa3f727a8a63ee9fd483d44..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-runtime-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/connect-transforms-3.3.1.jar b/src/alto/service/kafka_ale/libs/connect-transforms-3.3.1.jar deleted file mode 100644 index 5045807f52f8a5abba720b02f53b5af3bfc2a454..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/connect-transforms-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/hk2-api-2.6.1.jar b/src/alto/service/kafka_ale/libs/hk2-api-2.6.1.jar deleted file mode 100644 index 03d6eb05921f28d7fdca0f7bf1af7b187cdf1a8b..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/hk2-api-2.6.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/hk2-locator-2.6.1.jar b/src/alto/service/kafka_ale/libs/hk2-locator-2.6.1.jar deleted file mode 100644 index 0906bd1bdaca955a54c37a2b70ed393d5aa9fcca..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/hk2-locator-2.6.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/hk2-utils-2.6.1.jar b/src/alto/service/kafka_ale/libs/hk2-utils-2.6.1.jar deleted file mode 100644 index 768bc48800fee6da4526f9b8cff0de1773a7bc3f..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/hk2-utils-2.6.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-annotations-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-annotations-2.13.3.jar deleted file mode 100644 index 60a3ddba037a4c833c7e46fc4d59bd6bd4f45bf9..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-annotations-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-core-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-core-2.13.3.jar deleted file mode 100644 index 7e2086d39c8010e751eec7dfb4bddafe1568d76e..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-core-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-databind-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-databind-2.13.3.jar deleted file mode 100644 index a27e9d7055f40269e0957ced089aba5f0660a726..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-databind-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-dataformat-csv-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-dataformat-csv-2.13.3.jar deleted file mode 100644 index 638b25611aed0434c69355b05b50f00a471d3ffd..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-dataformat-csv-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-datatype-jdk8-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-datatype-jdk8-2.13.3.jar deleted file mode 100644 index 6e6a5a9c9cee97a58924e471fc31cc79a59cc734..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-datatype-jdk8-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-jaxrs-base-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-jaxrs-base-2.13.3.jar deleted file mode 100644 index 9cf40b88b2790a75b433ec323fd07df10369cd9a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-jaxrs-base-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-jaxrs-json-provider-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-jaxrs-json-provider-2.13.3.jar deleted file mode 100644 index a243cfd4832f5508544551ad0d3ae4e0c5222d2a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-jaxrs-json-provider-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-module-jaxb-annotations-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-module-jaxb-annotations-2.13.3.jar deleted file mode 100644 index f1babb37f8cdc3d268671dacc17f9282159d9afd..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-module-jaxb-annotations-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jackson-module-scala_2.13-2.13.3.jar b/src/alto/service/kafka_ale/libs/jackson-module-scala_2.13-2.13.3.jar deleted file mode 100644 index b9d4ab092759bad78c79aa73a34e8ee09e49bc63..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jackson-module-scala_2.13-2.13.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.activation-api-1.2.2.jar b/src/alto/service/kafka_ale/libs/jakarta.activation-api-1.2.2.jar deleted file mode 100644 index 3cc969d8f741f472a7648b5fc75dc0253d682744..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.activation-api-1.2.2.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.annotation-api-1.3.5.jar b/src/alto/service/kafka_ale/libs/jakarta.annotation-api-1.3.5.jar deleted file mode 100644 index 606d992eb235afffc3284caf6a2f97939e8a1682..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.annotation-api-1.3.5.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.inject-2.6.1.jar b/src/alto/service/kafka_ale/libs/jakarta.inject-2.6.1.jar deleted file mode 100644 index cee6acd2a0a81094bb1b6f8b75f2c6913301ba6d..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.inject-2.6.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.validation-api-2.0.2.jar b/src/alto/service/kafka_ale/libs/jakarta.validation-api-2.0.2.jar deleted file mode 100644 index d68c9f7f36d2cfda66f0b89e0719b73b034143b5..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.validation-api-2.0.2.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.ws.rs-api-2.1.6.jar b/src/alto/service/kafka_ale/libs/jakarta.ws.rs-api-2.1.6.jar deleted file mode 100644 index 4850659bb6c9f2872836f4d0d5f4600da774d440..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.ws.rs-api-2.1.6.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jakarta.xml.bind-api-2.3.3.jar b/src/alto/service/kafka_ale/libs/jakarta.xml.bind-api-2.3.3.jar deleted file mode 100644 index b8c7dc1ec856c2d34a77ed43248ed82b9eec8e9c..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jakarta.xml.bind-api-2.3.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/javassist-3.27.0-GA.jar b/src/alto/service/kafka_ale/libs/javassist-3.27.0-GA.jar deleted file mode 100644 index 092e59b4d68c3d5832cef4e04324212fd319ab48..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/javassist-3.27.0-GA.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/javax.servlet-api-3.1.0.jar b/src/alto/service/kafka_ale/libs/javax.servlet-api-3.1.0.jar deleted file mode 100644 index 6b14c3d267867e76c04948bb31b3de18e01412ee..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/javax.servlet-api-3.1.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/javax.ws.rs-api-2.1.1.jar b/src/alto/service/kafka_ale/libs/javax.ws.rs-api-2.1.1.jar deleted file mode 100644 index 3eabbf0874c4a56edf5f47fb9e26c2c125dc23b2..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/javax.ws.rs-api-2.1.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jaxb-api-2.3.0.jar b/src/alto/service/kafka_ale/libs/jaxb-api-2.3.0.jar deleted file mode 100644 index 0817c083ad9965d72e18c87dd23b63f8241adc50..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jaxb-api-2.3.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-client-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-client-2.34.jar deleted file mode 100644 index 16e921b8d5851400d3339a2cc7e9a44b0c6974e8..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-client-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-common-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-common-2.34.jar deleted file mode 100644 index fccbd99f9d4b8ee964daf95146945dede170c184..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-common-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-container-servlet-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-container-servlet-2.34.jar deleted file mode 100644 index 5571301739fe6f134c5cdeca722070cd8edae520..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-container-servlet-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-container-servlet-core-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-container-servlet-core-2.34.jar deleted file mode 100644 index 6615bb986f8c03984a5ee5d35a4f8d7464698f3d..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-container-servlet-core-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-hk2-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-hk2-2.34.jar deleted file mode 100644 index a5b453693be99ce12418f21c25dc8e58db4bc0bf..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-hk2-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jersey-server-2.34.jar b/src/alto/service/kafka_ale/libs/jersey-server-2.34.jar deleted file mode 100644 index 34da9f44ef1c30c09f8ab3e513dae250fdd5e3a0..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jersey-server-2.34.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-client-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-client-9.4.48.v20220622.jar deleted file mode 100644 index ef71f14139e427b3fff39d0c148434a6b9fbd665..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-client-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-continuation-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-continuation-9.4.48.v20220622.jar deleted file mode 100644 index 56f4bfe44166553802d5284f05ad1d4cc6f7051e..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-continuation-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-http-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-http-9.4.48.v20220622.jar deleted file mode 100644 index 1d67b1ada5c5b4aaa7d0b7cca17fadd894c8dc01..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-http-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-io-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-io-9.4.48.v20220622.jar deleted file mode 100644 index d767b113fd3156baeb5e34e7f051c87c5604245a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-io-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-security-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-security-9.4.48.v20220622.jar deleted file mode 100644 index c84855c4740570136b9c7d0a73cddb7206f12a55..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-security-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-server-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-server-9.4.48.v20220622.jar deleted file mode 100644 index 5261f0d3593bca6927d7b79780698893437a6ade..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-server-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-servlet-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-servlet-9.4.48.v20220622.jar deleted file mode 100644 index 684dc995956dfd38599907013664f3a0970bdd82..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-servlet-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-servlets-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-servlets-9.4.48.v20220622.jar deleted file mode 100644 index 0c030907de49de31ad496e1f75a2066024eabeb7..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-servlets-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-util-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-util-9.4.48.v20220622.jar deleted file mode 100644 index ddc49a8b09f5bd2b247f45daea8b2a2ab970e4be..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-util-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jetty-util-ajax-9.4.48.v20220622.jar b/src/alto/service/kafka_ale/libs/jetty-util-ajax-9.4.48.v20220622.jar deleted file mode 100644 index 7606be42a0722a804f1ad2f2e07c2dde17a850bc..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jetty-util-ajax-9.4.48.v20220622.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jline-3.21.0.jar b/src/alto/service/kafka_ale/libs/jline-3.21.0.jar deleted file mode 100644 index 293a111690c4934bca2b6e3be4be9faddf7a72fc..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jline-3.21.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jopt-simple-5.0.4.jar b/src/alto/service/kafka_ale/libs/jopt-simple-5.0.4.jar deleted file mode 100644 index 317b2b069c413d1b58a471e7d49d85e03e4425d6..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jopt-simple-5.0.4.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/jose4j-0.7.9.jar b/src/alto/service/kafka_ale/libs/jose4j-0.7.9.jar deleted file mode 100644 index 8f095a891a09435619452a01466e7ce31d9926b2..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/jose4j-0.7.9.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-clients-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-clients-3.3.1.jar deleted file mode 100644 index a5572e681a425abd0ef9fbaa27c860b0c9c166f8..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-clients-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-log4j-appender-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-log4j-appender-3.3.1.jar deleted file mode 100644 index 01913b43b6e1dfb347218010f55c6e0c1886a264..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-log4j-appender-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-metadata-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-metadata-3.3.1.jar deleted file mode 100644 index b31ed6916d12ef1636d16d5ad6a80ed893f4f6df..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-metadata-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-raft-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-raft-3.3.1.jar deleted file mode 100644 index a1e340141f03fe640e4045ca4510314c7b1fcb1f..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-raft-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-server-common-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-server-common-3.3.1.jar deleted file mode 100644 index bd3f1e91e23cae7028705a7906a3215a2d84c3a6..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-server-common-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-shell-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-shell-3.3.1.jar deleted file mode 100644 index 202226fb3a3b5b91e5810bfa0676672d0e86a002..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-shell-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-storage-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-storage-3.3.1.jar deleted file mode 100644 index 8e81841dfeb2930962c5b854fc4c263d82522c11..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-storage-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-storage-api-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-storage-api-3.3.1.jar deleted file mode 100644 index eb4ae22dc0b2215cba2f9ead9b1aa901cc6d73d5..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-storage-api-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-streams-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-streams-3.3.1.jar deleted file mode 100644 index c4292e55eb41383a81efb6d46fe495f955b5118a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-streams-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-streams-examples-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-streams-examples-3.3.1.jar deleted file mode 100644 index d4627a213f9504d725e732d29420d675ae42d77e..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-streams-examples-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-streams-scala_2.13-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-streams-scala_2.13-3.3.1.jar deleted file mode 100644 index fbcb30bc76a0883b82959b2f6ca57fd87b715de5..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-streams-scala_2.13-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-streams-test-utils-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-streams-test-utils-3.3.1.jar deleted file mode 100644 index 6f1da9156d1210a4a65b8acfbebff55fa67d686f..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-streams-test-utils-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka-tools-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka-tools-3.3.1.jar deleted file mode 100644 index db1ab21bffde0d82ae2edc1479fe569743c402fa..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka-tools-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/kafka_2.13-3.3.1.jar b/src/alto/service/kafka_ale/libs/kafka_2.13-3.3.1.jar deleted file mode 100644 index 2d638dc34a36c563f16855f4f1d9d9f560835795..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/kafka_2.13-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/lz4-java-1.8.0.jar b/src/alto/service/kafka_ale/libs/lz4-java-1.8.0.jar deleted file mode 100644 index 89c644b8e286e9da107d81de25f1be0fe6447607..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/lz4-java-1.8.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/maven-artifact-3.8.4.jar b/src/alto/service/kafka_ale/libs/maven-artifact-3.8.4.jar deleted file mode 100644 index d59793e4da01c4efa9c564cec0aff8cf78340d88..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/maven-artifact-3.8.4.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/metrics-core-2.2.0.jar b/src/alto/service/kafka_ale/libs/metrics-core-2.2.0.jar deleted file mode 100644 index 0f6d1cb0ecbd37f673bc87e109cacc535e23350a..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/metrics-core-2.2.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/metrics-core-4.1.12.1.jar b/src/alto/service/kafka_ale/libs/metrics-core-4.1.12.1.jar deleted file mode 100644 index 94fc8346ca7e632c8c0016c1a914ae504bec0ce3..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/metrics-core-4.1.12.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-buffer-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-buffer-4.1.78.Final.jar deleted file mode 100644 index f7ac3866e1a220f1307029722262f7f92f670fc2..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-buffer-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-codec-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-codec-4.1.78.Final.jar deleted file mode 100644 index 80f12ec76994b87cdf42be690981b14dc56372ec..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-codec-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-common-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-common-4.1.78.Final.jar deleted file mode 100644 index c909321becc530cb5ef0384af4338ec2433e0e1c..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-common-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-handler-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-handler-4.1.78.Final.jar deleted file mode 100644 index 500e2d021c9ca1f78b9f63a694ac211a6013e04b..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-handler-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-resolver-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-resolver-4.1.78.Final.jar deleted file mode 100644 index 7266786a1509801a2b60e9eefbe270912ba89435..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-resolver-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-transport-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-transport-4.1.78.Final.jar deleted file mode 100644 index dd3ca8bc4e6d9597bebc50ce591616bc4ffe35ca..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-transport-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-transport-classes-epoll-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-transport-classes-epoll-4.1.78.Final.jar deleted file mode 100644 index 4db23fb3dd8c5bed17e2ea721104bd6c5c84a322..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-transport-classes-epoll-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-transport-native-epoll-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-transport-native-epoll-4.1.78.Final.jar deleted file mode 100644 index 82d71d24bd155f040344e686e3c9ccc0db68da48..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-transport-native-epoll-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/netty-transport-native-unix-common-4.1.78.Final.jar b/src/alto/service/kafka_ale/libs/netty-transport-native-unix-common-4.1.78.Final.jar deleted file mode 100644 index e934559d481aaa0a9ab6c11fbf80db13211dedad..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/netty-transport-native-unix-common-4.1.78.Final.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/osgi-resource-locator-1.0.3.jar b/src/alto/service/kafka_ale/libs/osgi-resource-locator-1.0.3.jar deleted file mode 100644 index 0f3c38653fb6182b0833f554340dd0c7ca7c4093..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/osgi-resource-locator-1.0.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/paranamer-2.8.jar b/src/alto/service/kafka_ale/libs/paranamer-2.8.jar deleted file mode 100644 index 0bf659b93e74bb53003bc521f59c855d0ab8e692..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/paranamer-2.8.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/plexus-utils-3.3.0.jar b/src/alto/service/kafka_ale/libs/plexus-utils-3.3.0.jar deleted file mode 100644 index 81053c226e6c39b6f1d9d6d9ba2f9a8931605850..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/plexus-utils-3.3.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/reflections-0.9.12.jar b/src/alto/service/kafka_ale/libs/reflections-0.9.12.jar deleted file mode 100644 index 0f176b914126b6893fa8ae34ce21b2d710f60cae..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/reflections-0.9.12.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/reload4j-1.2.19.jar b/src/alto/service/kafka_ale/libs/reload4j-1.2.19.jar deleted file mode 100644 index 84affb2956a8d62410196431f5059dfdd6252a83..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/reload4j-1.2.19.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/rocksdbjni-6.29.4.1.jar b/src/alto/service/kafka_ale/libs/rocksdbjni-6.29.4.1.jar deleted file mode 100644 index e5027343cea99064725f968ba4882776d497efd8..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/rocksdbjni-6.29.4.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/scala-collection-compat_2.13-2.6.0.jar b/src/alto/service/kafka_ale/libs/scala-collection-compat_2.13-2.6.0.jar deleted file mode 100644 index 48909cd8ee667a5e9e0697496e6ce437615b6409..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/scala-collection-compat_2.13-2.6.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/scala-java8-compat_2.13-1.0.2.jar b/src/alto/service/kafka_ale/libs/scala-java8-compat_2.13-1.0.2.jar deleted file mode 100644 index 11bc17ecac20edcb6b7b7be8a230f72ed7238583..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/scala-java8-compat_2.13-1.0.2.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/scala-library-2.13.8.jar b/src/alto/service/kafka_ale/libs/scala-library-2.13.8.jar deleted file mode 100644 index 5c42e211b68a1f82b53e6b0181b5bf16e6640ea8..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/scala-library-2.13.8.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/scala-logging_2.13-3.9.4.jar b/src/alto/service/kafka_ale/libs/scala-logging_2.13-3.9.4.jar deleted file mode 100644 index 107e74102b62f79bb8fd5a8fa1d52bc65bd4f50d..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/scala-logging_2.13-3.9.4.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/scala-reflect-2.13.8.jar b/src/alto/service/kafka_ale/libs/scala-reflect-2.13.8.jar deleted file mode 100644 index 7d1f3d4abc34a2a9b82659764ae3605499e0f47f..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/scala-reflect-2.13.8.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/slf4j-api-1.7.36.jar b/src/alto/service/kafka_ale/libs/slf4j-api-1.7.36.jar deleted file mode 100644 index 7d3ce68d25e8b2237c3711145bbbea2c166d4767..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/slf4j-api-1.7.36.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/slf4j-reload4j-1.7.36.jar b/src/alto/service/kafka_ale/libs/slf4j-reload4j-1.7.36.jar deleted file mode 100644 index b007cc766ce6a6f269731e97e4c482ae0c388599..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/slf4j-reload4j-1.7.36.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/snappy-java-1.1.8.4.jar b/src/alto/service/kafka_ale/libs/snappy-java-1.1.8.4.jar deleted file mode 100644 index aa5231eba279f09b190bd872811a37b8f581c038..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/snappy-java-1.1.8.4.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/swagger-annotations-2.2.0.jar b/src/alto/service/kafka_ale/libs/swagger-annotations-2.2.0.jar deleted file mode 100644 index 7e4e641ffb043f18978f469333f6f80ff54cfc98..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/swagger-annotations-2.2.0.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/trogdor-3.3.1.jar b/src/alto/service/kafka_ale/libs/trogdor-3.3.1.jar deleted file mode 100644 index 2ebadfcb2a962db0f7a03db7f5420fa6592a5049..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/trogdor-3.3.1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/zookeeper-3.6.3.jar b/src/alto/service/kafka_ale/libs/zookeeper-3.6.3.jar deleted file mode 100644 index 1c65199fb2a022a091d80c2f0747d5a9e57c2dc0..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/zookeeper-3.6.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/zookeeper-jute-3.6.3.jar b/src/alto/service/kafka_ale/libs/zookeeper-jute-3.6.3.jar deleted file mode 100644 index af5450efc373c680d8dfe7a4447a920cd3fdd076..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/zookeeper-jute-3.6.3.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/libs/zstd-jni-1.5.2-1.jar b/src/alto/service/kafka_ale/libs/zstd-jni-1.5.2-1.jar deleted file mode 100644 index 14b4ed7afd109521eadd4cd591031579a2c0a979..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/libs/zstd-jni-1.5.2-1.jar and /dev/null differ diff --git a/src/alto/service/kafka_ale/licenses/CDDL+GPL-1.1 b/src/alto/service/kafka_ale/licenses/CDDL+GPL-1.1 deleted file mode 100644 index 4b156e6e78b43d59b07391bda0e39b62ca2f4e29..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/CDDL+GPL-1.1 +++ /dev/null @@ -1,760 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) <year> <name of author> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. - diff --git a/src/alto/service/kafka_ale/licenses/DWTFYWTPL b/src/alto/service/kafka_ale/licenses/DWTFYWTPL deleted file mode 100644 index 5a8e332545f667aab9bf3a17f11dba27c70b656a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/DWTFYWTPL +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/src/alto/service/kafka_ale/licenses/argparse-MIT b/src/alto/service/kafka_ale/licenses/argparse-MIT deleted file mode 100644 index 773b0df0e35919182940e77905607e6219243028..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/argparse-MIT +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) 2011-2017 Tatsuhiro Tsujikawa - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ diff --git a/src/alto/service/kafka_ale/licenses/classgraph-MIT b/src/alto/service/kafka_ale/licenses/classgraph-MIT deleted file mode 100644 index eddec36100b000d686bc6499b26211de428ae2e4..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/classgraph-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 Luke Hutchison - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/src/alto/service/kafka_ale/licenses/eclipse-distribution-license-1.0 b/src/alto/service/kafka_ale/licenses/eclipse-distribution-license-1.0 deleted file mode 100644 index 5f06513abf5fedd6b0d480ab36a2f941ba1b4545..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/eclipse-distribution-license-1.0 +++ /dev/null @@ -1,13 +0,0 @@ -Eclipse Distribution License - v 1.0 - -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/alto/service/kafka_ale/licenses/eclipse-public-license-2.0 b/src/alto/service/kafka_ale/licenses/eclipse-public-license-2.0 deleted file mode 100644 index c9f1425f82d02665b3ecd4d24b84e86437e7ea8a..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/eclipse-public-license-2.0 +++ /dev/null @@ -1,87 +0,0 @@ -Eclipse Public License - v 2.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE (“AGREEMENTâ€). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. -1. DEFINITIONS - -“Contribution†means: - - a) in the case of the initial Contributor, the initial content Distributed under this Agreement, and - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from and are Distributed by that particular Contributor. A Contribution “originates†from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include changes or additions to the Program that are not Modified Works. - -“Contributor†means any person or entity that Distributes the Program. - -“Licensed Patents†mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -“Program†means the Contributions Distributed in accordance with this Agreement. - -“Recipient†means anyone who receives the Program under this Agreement or any Secondary License (as applicable), including Contributors. - -“Derivative Works†shall mean any work, whether in Source Code or other form, that is based on (or derived from) the Program and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. - -“Modified Works†shall mean any work in Source Code or other form that results from an addition to, deletion from, or modification of the contents of the Program, including, for purposes of clarity any new file in Source Code form that contains any contents of the Program. Modified Works shall not include works that contain only declarations, interfaces, types, classes, structures, or files of the Program solely in each case in order to link to, bind by name, or subclass the Program or Modified Works thereof. - -“Distribute†means the acts of a) distributing or b) making available in any manner that enables the transfer of a copy. - -“Source Code†means the form of a Program preferred for making modifications, including but not limited to software source code, documentation source, and configuration files. - -“Secondary License†means either the GNU General Public License, Version 2.0, or any later versions of that license, including any exceptions or additional permissions as identified by the initial Contributor. -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, Distribute and sublicense the Contribution of such Contributor, if any, and such Derivative Works. - b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in Source Code or other form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to Distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - e) Notwithstanding the terms of any Secondary License, no Contributor makes additional grants to any Recipient (other than those set forth in this Agreement) as a result of such Recipient's receipt of the Program under the terms of a Secondary License (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in accordance with section 3.2, and the Contributor must accompany the Program with a statement that the Source Code for the Program is available under this Agreement, and informs Recipients how to obtain it in a reasonable manner on or through a medium customarily used for software exchange; and - b) the Contributor may Distribute the Program under a license different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - ii) effectively excludes on behalf of all other Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - iii) does not attempt to limit or alter the recipients' rights in the Source Code under section 3.2; and - iv) requires any subsequent distribution of the Program by any party to be under a license that satisfies the requirements of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the Program (i) is combined with other material in a separate file or files made available under a Secondary License, and (ii) the initial Contributor attached to the Source Code the notice described in Exhibit A of this Agreement, then the Program may be made available under the terms of such Secondary Licenses, and - b) a copy of this Agreement must be included with each copy of the Program. - -3.3 Contributors may not remove or alter any copyright, patent, trademark, attribution notices, disclaimers of warranty, or limitations of liability (‘notices’) contained within the Program from any copy of the Program which they Distribute, provided that Contributors may add their own appropriate notices. -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor (“Commercial Contributorâ€) hereby agrees to defend and indemnify every other Contributor (“Indemnified Contributorâ€) against any losses, damages and costs (collectively “Lossesâ€) arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN “AS IS†BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be Distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to Distribute the Program (including its Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. Nothing in this Agreement is intended to be enforceable by any entity that is not a Contributor or Recipient. No third-party beneficiary rights are created under this Agreement. -Exhibit A – Form of Secondary Licenses Notice - -“This Source Code may also be made available under the following Secondary Licenses when the conditions for such availability set forth in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), version(s), and exceptions or additional permissions here}.†- - Simply including a copy of this Agreement, including this Exhibit A is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. - - You may add additional accurate notices of copyright ownership. - diff --git a/src/alto/service/kafka_ale/licenses/jline-BSD-3-clause b/src/alto/service/kafka_ale/licenses/jline-BSD-3-clause deleted file mode 100644 index 7e11b67fba770f5177946862f1a798577b3c2977..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/jline-BSD-3-clause +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2002-2018, the original author or authors. -All rights reserved. - -https://opensource.org/licenses/BSD-3-Clause - -Redistribution and use in source and binary forms, with or -without modification, are permitted provided that the following -conditions are met: - -Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with -the distribution. - -Neither the name of JLine nor the names of its contributors -may be used to endorse or promote products derived from this -software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, -OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED -AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/src/alto/service/kafka_ale/licenses/jopt-simple-MIT b/src/alto/service/kafka_ale/licenses/jopt-simple-MIT deleted file mode 100644 index 54b27325bb6b2b92043e23932329dc2ef107e291..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/jopt-simple-MIT +++ /dev/null @@ -1,24 +0,0 @@ -/* - The MIT License - - Copyright (c) 2004-2016 Paul R. Holser, Jr. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ diff --git a/src/alto/service/kafka_ale/licenses/paranamer-BSD-3-clause b/src/alto/service/kafka_ale/licenses/paranamer-BSD-3-clause deleted file mode 100644 index 9eab87918636a2933a9e780689d08839f515c898..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/paranamer-BSD-3-clause +++ /dev/null @@ -1,29 +0,0 @@ -[ ParaNamer used to be 'Pubic Domain', but since it includes a small piece of ASM it is now the same license as that: BSD ] - - Portions copyright (c) 2006-2018 Paul Hammant & ThoughtWorks Inc - Portions copyright (c) 2000-2007 INRIA, France Telecom - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/alto/service/kafka_ale/licenses/slf4j-MIT b/src/alto/service/kafka_ale/licenses/slf4j-MIT deleted file mode 100644 index 315bd4979f1558116a47bf33cea913c2b60c39f3..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/slf4j-MIT +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2004-2017 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - diff --git a/src/alto/service/kafka_ale/licenses/zstd-jni-BSD-2-clause b/src/alto/service/kafka_ale/licenses/zstd-jni-BSD-2-clause deleted file mode 100644 index 66abb8ae782c862428611d37f6c646cf9cc50fca..0000000000000000000000000000000000000000 --- a/src/alto/service/kafka_ale/licenses/zstd-jni-BSD-2-clause +++ /dev/null @@ -1,26 +0,0 @@ -Zstd-jni: JNI bindings to Zstd Library - -Copyright (c) 2015-present, Luben Karavelov/ All rights reserved. - -BSD License - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/alto/service/kafka_ale/site-docs/kafka_2.13-3.3.1-site-docs.tgz b/src/alto/service/kafka_ale/site-docs/kafka_2.13-3.3.1-site-docs.tgz deleted file mode 100644 index 39c780f00c55b7d465a27130de0aa15039234ba1..0000000000000000000000000000000000000000 Binary files a/src/alto/service/kafka_ale/site-docs/kafka_2.13-3.3.1-site-docs.tgz and /dev/null differ diff --git a/src/alto/service/lanzadera.py b/src/alto/service/lanzadera.py deleted file mode 100644 index 81e6fd77ef929ab5f43a0e9f3217210cc6b0e42c..0000000000000000000000000000000000000000 --- a/src/alto/service/lanzadera.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import threading -sys.path.append('cdn-alto/') -sys.path.append('alto-ale/') -from topology_maps_generator import TopologyCreator -from exponsure import ApiHttp - - -#Creation of ALTO modules -modules={} -modules['bgp'] = TopologyBGP(('localhost',8080)) -#modules['ietf'] = TopologyIetf(('localhost',8081)) -alto = TopologyCreator(modules, 0) -threads = list() -for modulo in modules.keys(): - print(modulo) - x = threading.Thread(target=alto.gestiona_info, args=(modulo,))#, daemon=True) - threads.append(x) - x.start() - -a = threading.Thread(target=alto.mailbox) -threads.append(a) -a.start() -app.run() - diff --git a/src/alto/service/modulos/alto_module.py b/src/alto/service/modulos/alto_module.py deleted file mode 100644 index 6f74d360232f8b2b800c17b435abd9393f5a9d37..0000000000000000000000000000000000000000 --- a/src/alto/service/modulos/alto_module.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib -from abc import ABC, abstractmethod - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -sys.path.append('alto-ale/') -from ipaddress import ip_address, IPv4Address - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class AltoModule(ABC): - - def __init__(self, mb): - #self.props = {} - self.pids = {} - #self.topology = networkx.Graph() - #self.cost_map = {} - #self.router_ids = [] - self.ejes = {} - self.vtag = 0 - self.mailbox = mb - self.ts = {} - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid_sec(self, router, asn = 0): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (asn, hash_r.hexdigest()[:32], tsn)) - - def obtain_pid(self, router, asn = 0): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - return ('pid%d:%s' % (asn, rid)) - - - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (autonomous_system, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id(self, value): - if self.check_if_router_id_is_hex(value): - return self.split_router_ids(value) - elif "." in value: - return value - else: - return self.reverse_ip(self.hex_to_ip(value)) - - def _get_info_from_node_descript_list(self, node_descriptors, key: str, rid=''): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - if key == "router-id": - result.append(self._get_router_id(value)) - #print(value, key_d) - elif key == 'autonomous-system': - for des in node_descriptors: - for kd, val in des.items(): - #print(kd,val) - if kd == "router-id": - return value - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_info_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_info_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_info_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_info_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - def return_info(self, src, tipo, costs, msn): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - meta = '{"source":'+str(src)+', "action":'+str(tipo)+', "costs":'+str(costs)+"}" - msg = '{"meta":' + str(meta) +', "data":' + str(msn) + "}" - msg = msg.replace("(", '"(') - msg = msg.replace(")", ')"') - print("Sending data to: " + str(self.mailbox)) - s.sendto(msg.encode(), self.mailbox) - - ### Manager function - @abstractmethod - def manage_topology_updates(self): - pass diff --git a/src/alto/service/modulos/ietf2_prueba.json b/src/alto/service/modulos/ietf2_prueba.json deleted file mode 100644 index 154266906b5138fed966c494d5e931283e08a955..0000000000000000000000000000000000000000 --- a/src/alto/service/modulos/ietf2_prueba.json +++ /dev/null @@ -1,1952 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "0 : 0 : 0", - "network-types": { - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/0 - ATN950C-2_HL5-3-1 GigabitEthernet0/2/4" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/10 - NE40X2-1_HL4-2-1 50|100GE0/1/0" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/2 - NE40X8-3_HL4 GigabitEthernet4/0/1" - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/8 - ATN950C-1_HL5-1-1 Ethernet0/0/0" - }, - { - "link-id": "ATN950C-1_HL5-1-1 Ethernet0/0/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/8" - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/2/4 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0 - HL2-2-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1 - HL2-3-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2 - HL2-1-2 ge-0/0/2" - }, - { - "link-id": "HL2-1-2 ge-0/0/0 - HL2-2-2 ge-0/0/0" - }, - { - "link-id": "HL2-1-2 ge-0/0/1 - HL2-3-2 ge-0/0/1" - }, - { - "link-id": "HL2-1-2 ge-0/0/2 - HL2-1-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0 - HL2-1-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1 - HL2-4-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2 - HL2-2-2 ge-0/0/2" - }, - { - "link-id": "HL2-2-2 ge-0/0/0 - HL2-1-2 ge-0/0/0" - }, - { - "link-id": "HL2-2-2 ge-0/0/1 - HL2-4-2 ge-0/0/1" - }, - { - "link-id": "HL2-2-2 ge-0/0/2 - HL2-2-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0 - HL2-4-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1 - HL2-1-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2 - HL2-3-2 ge-0/0/2" - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3 - NE40X8-1_HL3 GigabitEthernet4/0/2" - }, - { - "link-id": "HL2-3-2 ge-0/0/0 - HL2-4-2 ge-0/0/0" - }, - { - "link-id": "HL2-3-2 ge-0/0/1 - HL2-1-2 ge-0/0/1" - }, - { - "link-id": "HL2-3-2 ge-0/0/2 - HL2-3-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0 - HL2-3-1 GigabitEthernet0/0/0/0" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1 - HL2-2-1 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2 - HL2-4-2 ge-0/0/2" - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/3 - HL3-2-2 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/0 - HL2-3-2 ge-0/0/0" - }, - { - "link-id": "HL2-4-2 ge-0/0/1 - HL2-2-2 ge-0/0/1" - }, - { - "link-id": "HL2-4-2 ge-0/0/2 - HL2-4-1 GigabitEthernet0/0/0/2" - }, - { - "link-id": "HL2-4-2 ge-0/0/4 - HL3-2-2 GigabitEthernet0/0/0/1" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1 - HL2-4-2 ge-0/0/4" - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/2 - HL2-4-1 GigabitEthernet0/0/0/3" - }, - { - "link-id": "NE40X2-1_HL4-2-1 50|100GE0/1/0 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/10" - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/2" - } - ] - }, - { - "network-id": "0 : 0 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "3.3.3.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-1_HL3", - "router-id": ["3.3.3.1"], - "prefix": [ - { - "prefix": "3.3.3.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-2_HL3", - "router-id": ["3.3.3.2"], - "prefix": [ - { - "prefix": "3.3.3.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.35.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X8-3_HL4", - "router-id": ["4.4.4.1"], - "prefix": [ - { - "prefix": "4.4.4.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "NE40X2-1_HL4-2-1", - "router-id": ["4.4.4.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "50|100GE0/1/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-1-2", - "router-id": ["5.5.5.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-1_HL5-1-1", - "router-id": ["5.5.5.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "Ethernet0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.56.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN910C-2_HL5-2-1", - "router-id": ["5.5.5.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/2/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/10", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.4.41.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/8", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "ATN950C-2_HL5-3-1", - "router-id": ["5.5.5.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.5.54.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/2/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "5.5.5.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL5-2-2", - "router-id": ["5.5.5.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - }, - { - "node-id": "5.5.5.6", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "AS7315-30X", - "router-id": ["5.5.5.6"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54 - ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41 - NE40X8-3_HL4 GigabitEthernet4/0/6.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56 - NE40X8-3_HL4 GigabitEthernet4/0/1.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "-1", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "ATN950C-2_HL5-3-1 GigabitEthernet0/1/0.54 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/5.54", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/0.43 - NE40X8-3_HL4 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/1.35 - NE40X8-2_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-1_HL3 GigabitEthernet4/0/2.52 - HL2-3-1 GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "-1", - "metric2": "-1", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-2_HL3 GigabitEthernet4/0/1.35 - NE40X8-1_HL3 GigabitEthernet4/0/1.35", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/0.43 - NE40X8-1_HL3 GigabitEthernet4/0/0.43", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/1.56 - ATN950C-1_HL5-1-1 GigabitEthernet0/1/0.56", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "NE40X8-3_HL4 GigabitEthernet4/0/6.41 - ATN910C-2_HL5-2-1 GigabitEthernet0/2/7.41", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "0", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "3.3.3.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_3", - "router-id": ["3.3.3.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "to_HL2-3-2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "4.4.4.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "7750SR-7_4", - "router-id": ["4.4.4.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.43.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_7750SR7_HL4 - 7750SR-7_4 to_7750SR7_HL3", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "7750SR-7_4 to_7750SR7_HL3 - 7750SR-7_3 to_7750SR7_HL4", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "0 : 1111 : 0 ISIS", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "1.1.1.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-1", - "router-id": ["1.1.1.1"], - "prefix": [ - { - "prefix": "1.1.1.1/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.1"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-1", - "router-id": ["1.1.1.2"], - "prefix": [ - { - "prefix": "1.1.1.2/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.2.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-1", - "router-id": ["1.1.1.3"], - "prefix": [ - { - "prefix": "1.1.1.3/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.3.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3.52", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["98.3.52.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "1.1.1.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-1", - "router-id": ["1.1.1.4"], - "prefix": [ - { - "prefix": "1.1.1.4/32" - } - ] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.4.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/3", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.1", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-1-2", - "router-id": ["2.2.2.1"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.11.11"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.2", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-2-2", - "router-id": ["2.2.2.2"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.12.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.22.22"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.3", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-3-2", - "router-id": ["2.2.2.3"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.3"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.1.13.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.33.33"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.50.2"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "2.2.2.4", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL2-4-2", - "router-id": ["2.2.2.4"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "ge-0/0/0", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.34.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.2.24.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.4.44.44"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.4"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - }, - { - "node-id": "3.3.3.5", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "HL3-2-2", - "router-id": ["3.3.3.5"] - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/1", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ip-address": ["99.3.51.5"], - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - }, - { - "tp-id": "GigabitEthernet0/0/0/2", - "ietf-l3-unicast-topology:l3-termination-point-attributes": { - "ietf-l3-isis-topology:isis-termination-point-attributes": { - "level": "level-2" - } - } - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "7750SR-7_3 to_HL2-3-2 - HL2-3-2 ge-0/0/4.50", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/0.12 - HL2-2-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/1.13 - HL2-3-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-1 GigabitEthernet0/0/0/2.11 - HL2-1-2 ge-0/0/2.11", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/0.12 - HL2-2-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "12", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-1-2 ge-0/0/1.13 - HL2-3-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "20", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/0.12 - HL2-1-1 GigabitEthernet0/0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/1.24 - HL2-4-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-1 GigabitEthernet0/0/0/2.22 - HL2-2-2 ge-0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "30", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/0.12 - HL2-1-2 ge-0/0/0.12", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "40", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/1.24 - HL2-4-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-2-2 ge-0/0/2.22 - HL2-2-1 GigabitEthernet0/0/0/2.22", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/0.34 - HL2-4-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/1.13 - HL2-1-1 GigabitEthernet0/0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/2.33 - HL2-3-2 ge-0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-1 GigabitEthernet0/0/0/3.52 - NE40X8-1_HL3 GigabitEthernet4/0/2.52", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/0.34 - HL2-4-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/1.13 - HL2-1-2 ge-0/0/1.13", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/2.33 - HL2-3-1 GigabitEthernet0/0/0/2.33", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-3-2 ge-0/0/4.50 - 7750SR-7_3 to_HL2-3-2", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/0.34 - HL2-3-1 GigabitEthernet0/0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/1.24 - HL2-2-1 GigabitEthernet0/0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-1 GigabitEthernet0/0/0/2.44 - HL2-4-2 ge-0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/0.34 - HL2-3-2 ge-0/0/0.34", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/1.24 - HL2-2-2 ge-0/0/1.24", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/2.44 - HL2-4-1 GigabitEthernet0/0/0/2.44", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL2-4-2 ge-0/0/4.51 - HL3-2-2 GigabitEthernet0/0/0/1.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - }, - { - "link-id": "HL3-2-2 GigabitEthernet0/0/0/1.51 - HL2-4-2 ge-0/0/4.51", - "ietf-l3-unicast-topology:l3-link-attributes": { - "metric1": "10", - "metric2": "10", - "tefsdn-topology:domain-id": "1111", - "tefsdn-topology:link-attributes": { - "level": "2" - } - } - } - ] - }, - { - "network-id": "TEST_Cisco", - "network-types": { - "ietf-l3-isis-topology:isis-topology": { - }, - "ietf-l3-unicast-topology:l3-unicast-topology": { - } - }, - "node": [ - { - "node-id": "['4.4.4.1']", - "ietf-l3-unicast-topology:l3-node-attributes": { - "name": "Cisco-R3" - }, - "ietf-ne-commissioning:commissioning-configs": { - "system-config": { - "openconfig-system:system": { - "ssh-server": { - "state": { - "enable": true, - "protocol-version": "V2" - } - }, - "telnet-server": { - "state": { - "enable": false - } - } - } - } - }, - "ietf-network-topology:termination-point": [ - { - "tp-id": "GigabitEthernet0/0/0/0" - }, - { - "tp-id": "GigabitEthernet0/0/0/1" - }, - { - "tp-id": "GigabitEthernet0/0/0/2" - }, - { - "tp-id": "GigabitEthernet0/0/0/3" - }, - { - "tp-id": "GigabitEthernet0/0/0/4" - }, - { - "tp-id": "GigabitEthernet0/0/0/5" - }, - { - "tp-id": "GigabitEthernet0/0/0/6" - }, - { - "tp-id": "Loopback0" - }, - { - "tp-id": "Loopback76" - }, - { - "tp-id": "MgmtEth0/RP0/CPU0/0" - } - ] - } - ] - } - ] - } -} diff --git a/src/alto/service/modulos/topology_bgp.py b/src/alto/service/modulos/topology_bgp.py deleted file mode 100644 index 85e57756d932d72887987cdc54e5e8025c0ee675..0000000000000000000000000000000000000000 --- a/src/alto/service/modulos/topology_bgp.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from ipaddress import ip_address, IPv4Address -from modulos.alto_module import AltoModule - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyBGP(AltoModule): - - def __init__(self, mb): - super().__init__(mb) - self.exabgp_process = ManageBGPSpeaker().check_tcp_connection() - '''self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - self.vtag = 0 - self.mailbox = mb - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def return_info(self, proto, msn): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - msg = "{'src':" + str(proto) +", 'map':" + str(msn) + "}" - s.sendto(msg.encode(), self.mailbox) -''' - - ### Topology generation and information recopilation functions - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_info_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_info_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.ejes.append((i, j, igp_metric)) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_info_from_node_descript_list(lsa['node-descriptors'], 'router-id') - for nd in node_descriptors: - if nd not in self.pids.values(): - auts=self._get_info_from_node_descript_list(lsa['node-descriptors'], 'autonomous-system', nd) - if auts == []: - print("Tremenda F " + str(nd)) - auts = 0 - pid = self.obtain_pid(nd, auts) - self.pids[pid] = nd - - - - ### Manager function - - def manage_topology_updates(self): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - while True: - line = self.exabgp_process.stdout.readline().strip() - tipo = -1 - if b'decoded UPDATE' in line and b'json' in line: - #print(line) - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - igp_metric = update_msg['attribute'].get('bgp-ls', {}).get("igp-metric", 1) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - if self.discard_message_from_protocol_id(prefix, [4, 5]): - continue - #print("hola load") - self.load_topology(prefix, igp_metric) - #self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - #print("hola pid" + str(pids_to_load)) - #pid = self.obtain_pid(nd, auts) - #self.pids[pid] = nd - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - - tipo = 1 - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - tipo = 0 - #self.compute_costmap() - #Aquà deberÃamos mandar periódicamente la info al ALTO jefe. - datos = str(self.pids).replace("'", '"') - data = '{"pids":'+datos+',"costs-list": '+str(self.ejes)+"}" - #print(str(data)) - self.return_info(0,tipo,1,data) diff --git a/src/alto/service/modulos/topology_ietf.py b/src/alto/service/modulos/topology_ietf.py deleted file mode 100644 index de25fee8ba94fbf4f99e8def55bc9d81d3c9256d..0000000000000000000000000000000000000000 --- a/src/alto/service/modulos/topology_ietf.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 - -import os -import json -import networkx -import hashlib - -from time import sleep -from datetime import datetime -#sys.path.append('cdn-alto/') -#sys.path.append('alto-ale/') -from ipaddress import ip_address -from modulos.alto_module import AltoModule - - -DEFAULT_ASN = 1 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] -MAX_VAL = 16777214 - -class TopologyIetf(AltoModule): - - def __init__(self, mb): - super().__init__(mb) - ''' self.ietf_process = 0 - self.props = {} - self.pids = {}''' - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - self.ts = {} - - - ### Manager function - def manage_topology_updates(self): - while 1: - #sleep(15) - sleep(5) - self.manage_updates() - - - def manage_updates(self): - ''' - Receives topology information from the PCE by the Southaband Interface and creates/updates the graphs - Realizes an iterational analisis, reviewing each network: if two networks are the same but by different protocols, they must to be merged. - Three attributes on each network: dic[ips], dic[interfaces] and graph[links] - ''' - #Diccionario nodo-id:nombre - nodos = {} - #Diccionario nodo-id:[(interfaz, ip)] - tps = {} - #Lista de enlaces - links = [] - full_path = os.path.join("./", "ietf2_prueba.json") - with open(full_path, 'r') as archivo: - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - #while True: - deluro = archivo.read() - d_json = json.loads(str(deluro)) - #print("Tipo = " + str(type(d_json)) + "\nMensaje = " + str(d_json)) - ietf_networks = d_json["ietf-network:networks"] - if ietf_networks == '': - return - #Creo un diccionario con todas las redes que hay y lo recorro para buscar las válidas - for net in ietf_networks["network"]: - if "node" in net.keys() and "ietf-network-topology:link" in net.keys(): - for nodo in net["node"]: - #Realizo un macheo de los IDs de los nodos con el nombre y el/los prefijo/s. - nodos[nodo["node-id"]] = nodo["ietf-l3-unicast-topology:l3-node-attributes"]["name"] - tps[nodo["node-id"]] = [] - if "ietf-network-topology:termination-point" in nodo.keys(): - for tp in nodo["ietf-network-topology:termination-point"]: - tps[nodo["node-id"]].append(str(nodos[nodo["node-id"]]) + ' ' + str(tp["tp-id"])) - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(nodo["node-id"])) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if 'ipv4' not in self.pids[pid_name]: - self.pids[pid_name]['ipv4']=[] - if nodo['node-id'] not in self.pids[pid_name]['ipv4']: - self.pids[pid_name]['ipv4'].append( nodo['node-id']) - self.topology.add_node(nodo['node-id']) - - # Falta listar los enlaces y guardarlos. - for link in net["ietf-network-topology:link"]: - a,b = link["link-id"].split(" - ") - if a == '' or b == '': - break - a1 = a.split(' ')[0] - b1 = b.split(' ')[0] - for k in nodos.keys(): - if nodos[k] == a1: - a = k - elif nodos[k] == b1: - b = k - links.append(((a,b),link["ietf-l3-unicast-topology:l3-link-attributes"]["metric1"])) - - # Una vez funciona todo, en vez de almacenarlo en diccionarios los guardamos en un grafo. -> Los nodos se pueden ir pasando ya arriba. - # Ahora mismo va todo correcto, falta pasar los a,b a PID en vez de node-id. - for link in links: - if int(link[1])>=0: - self.topology.add_edge(link[0][0], link[0][1], weight=int(link[1])) - self.ejes[(link[0][0], link[0][1])] = int(link[1]) - #print("Hola Mundo") - #self.ejes.append((link[0][0], link[0][1], int(link[1]))) - # Hay que revisar qué diccionarios seguirÃan haciendo falta. - # Dado que bgp lo representa con node-id - node-id, quizás es importante unificar la representación que se muestre. (done) - # Qué hacemos con las interfaces? Las mostramos en los ejes o no hace falta? Guardamos una lista de enlaces donde se vean cómo se conectan? - self.compute_costmap() - datos = str(self.pids).replace("'", '"') - nodos = list(set(self.topology.nodes())) - snodos = str(nodos).replace("'", '"') - - #print(self.ejes.keys) - z_ejes = [(tupla[0], tupla[1], self.ejes[tupla]) for tupla in self.ejes] - #print(str(z_ejes)) - data = '{"pids":'+datos+',"nodes-list": '+snodos+',"costs-list": '+str(z_ejes)+"}" - self.return_info(2,0,1, data) - - diff --git a/src/alto/service/modulos/topology_maps_generator.py b/src/alto/service/modulos/topology_maps_generator.py deleted file mode 100644 index 9155aa94f2e10f92053de600ce5aba615b30228c..0000000000000000000000000000000000000000 --- a/src/alto/service/modulos/topology_maps_generator.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib -import threading -import flask - -from time import sleep -from datetime import datetime -sys.path.append('cdn-alto/') -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer -#from api_pybatfish import BatfishManager -from yang_alto import RespuestasAlto -from ipaddress import ip_address, IPv4Address -from modulos.topology_bgp import TopologyBGP -from modulos.topology_ietf import TopologyIetf - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - - -#Parte API -app = flask.Flask(__name__) -app.config["DEBUG"] = True - -@app.route('/', methods=['GET']) -def home(): - return ''' - <h1>API DE ACCESO AL SERVICE ALTO DE PRUEBAS</h1> - <h2>Servicios disponibles:</h2> - <p><ul> - <li>Todos los camimos disjuntos entre A y B: <b><tt> /all/<string:a>/<string:b> </b></tt></li> - <li>Camino más corto entre A y B: <b><tt> /best/<string:a>/<string:b> </b></tt></li> - <li>Mapa de costes: /costs </li> - <li>Mapa de PIDs: /pids </li> - </ul></p> - ''' - -################################### -## ## -# Services defined in RFC 7285 # -## ## -################################### - -# Map-Filteriong Service -@app.route('/costmap/filter/<string:pid>', methods=['GET']) -def api_costs_by_pid(pid): - return flask.jsonify(alto.get_costs_map_by_pid(pid)) - -#Endpoint Property Service -@app.route('/properties/<string:pid>', methods=['GET']) -def api_properties(pid): - return flask.jsonify(alto.get_properties(pid)) - -#Map Service -@app.route('/maps', methods=['GET']) -def api_maps(): - return flask.jsonify(alto.get_maps()) - -#Network Map service -@app.route('/costmap', methods=['GET']) -def api_costs(): - return flask.jsonify(alto.get_costs_map()) - -@app.route('/networkmap', methods=['GET']) -def api_pids(): - return flask.jsonify(alto.get_pids()) - -@app.route('/directory', methods=['GET']) -def api_directory(): - return flask.jsonify(alto.get_directory()) - - -################################### -## ## -# Ampliations # -## ## -################################### - - -#All possible paths between A and B without any common node -@app.route('/all/<string:a>/<string:b>', methods=['GET']) -def api_all(a, b): - return flask.jsonify(alto.parseo_yang(str(alto.all_maps(alto.topology,a,b)),"all-paths")) - -#Best path between A and B -@app.route('/best/<string:a>/<string:b>', methods=['GET']) -def api_shortest(a, b): - return flask.jsonify(str(shortest_path(a, b))) - - -class TopologyCreator: - - def __init__(self, modules, mode): - self.d_modules = modules - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - if mode: - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - #self.bfm = BatfishManager() - self.vtag = 0 - self.resp = RespuestasAlto() - #self.hilos = self.lanzadera() - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, d_pids, l_ejes): - for pid in d_pids.keys(): - if d_pids[pid] not in self.topology.nodes(): - self.topology.add_node(d_pids[pid]) - self.pids[pid] = d_pids[pid] - for eje in l_ejes: - src, des, metric = eje.strip("() ").replace("'","").replace('"',"").split(",") - #print(src + '\t' + des + '\t' + str(metric)) - self.topology.add_edge(src, des, weight=int(metric)) - print("Topology loaded") - print(self.pids) - print(str(self.topology.edges())) - - - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - ctrl = 0 - for src, dest_pids in shortest_paths.items(): - for key in self.pids.keys(): - if self.pids[key] == src: - src_pid_name = key - ctrl = 1 - break - if ctrl == 0: - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - for key in self.pids.keys(): - if self.pids[key] == dest_pid: - dst_pid_name = key - ctrl = 1 - break - if ctrl == 0: - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - - ### RFC7285 functions - def get_costs_map_by_pid(self, pid): - #pid = "pid0:" + str(npid) - #print(pid) - #print(str(self.pids)) - if pid in self.cost_map.keys(): - #print(str(self.pids)) - #print(str(self.cost_map)) - return self.resp.crear_respuesta("filtro", "networkmap-default", self.vtag, str(self.cost_map[pid])) - else: - return "404: Not Found" - - def get_properties(self, pid): - #return str(self.bf.session.q.nodeProperties().answer().frame()) - return "Implementation in proccess. Sorry dude" - - def get_endpoint_costs(self, pid): - return "Implementation in proccess. Sorry dude" - - def get_maps(self): - return ('{"pids_map":' + self.get_pids() + ', "costs_map":' + self.get_costs_map() + '}') - - def get_costs_map(self): - print(self.cost_map) - return self.resp.crear_respuesta("cost-map", "networkmap-default", self.vtag, str(self.cost_map)) - - def get_pids(self): - return self.resp.crear_respuesta("pid-map", "networkmap-default", self.vtag, str(self.pids)) - - def get_directory(self): - return self.resp.indice() - - ### Ampliation functions - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - def lanzadera(self): - threads = list() - for modulo in self.d_modules.keys(): - #print(modulo) - x = threading.Thread(target=self.gestiona_info, args=(modulo,))#, daemon=True) - threads.append(x) - x.start() - #x = threading.Thread(target=self.mailbox) - #threads.append(x) - #x.start() - return threads - - - def procesar(self, s_topo:str): - #print(s_topo) - try: - topo = json.loads(s_topo) - except Exception as e: - print(e) - print('\t' + s_topo) - return - - if topo['meta']['action'] == 1: - self.load_topology(topo['data']['pids'], topo['data']['costs-list']) - else: - self.eliminar(topo['data']['ejes']) - - self.compute_costmap() - - - ### Manager function - def gestiona_info(self, fuente): - if fuente in self.d_modules.keys(): - self.d_modules[fuente].manage_topology_updates() - - def mailbox(self, port=8080): - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.bind(('localhost',port)) - print("Waiting...") - while 1: - topo = s.recv(4096) - print("Recibido:") - topo = topo.decode() - self.procesar(topo) - # Aquà se deben de gestionar los datos recibidos - # Recibe una lista de nodos, una lista de ejes (con pesos), un indicador de la métrica pasada y la funete. - # Los nodos ya deben estar parseados según el AS. - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - - -if __name__ == '__main__': - #Creation of ALTO modules - modules={} - modules['bgp'] = TopologyBGP(('localhost',8080)) - #modules['ietf'] = TopologyIetf(('localhost',8081)) - - alto = TopologyCreator(modules, 0) - threads = list() - - for modulo in modules.keys(): - print(modulo) - x = threading.Thread(target=alto.gestiona_info, args=(modulo,))#, daemon=True) - threads.append(x) - x.start() - - a = threading.Thread(target=alto.mailbox) - threads.append(a) - a.start() - - app.run() diff --git a/src/alto/service/pruebas/costmap b/src/alto/service/pruebas/costmap deleted file mode 100644 index 7fb4f485e3f21ecb70cdf789aac5203b4d274750..0000000000000000000000000000000000000000 --- a/src/alto/service/pruebas/costmap +++ /dev/null @@ -1 +0,0 @@ -"{'meta':{'type':'alto-costmap+json','dependent-vtag':[{'resource-id':'networkmap-default','tag': '0'}],'cost-type': {'cost-mode' : 'numerical','cost-metric' : 'routingcost'}},'cost-map':{}}" diff --git a/src/alto/service/realizar_git.sh b/src/alto/service/realizar_git.sh deleted file mode 100644 index f89961f7c1f15ca0fa35979629c21505a7f1de12..0000000000000000000000000000000000000000 --- a/src/alto/service/realizar_git.sh +++ /dev/null @@ -1,6 +0,0 @@ -#/bin/bash - -git init -git add $* -git commit -m $( date +"%s" ) -git push origin main diff --git a/src/alto/service/requirements.txt b/src/alto/service/requirements.txt deleted file mode 100644 index 1b1d905bf8c16efed162d6b778907a5bc80deaed..0000000000000000000000000000000000000000 --- a/src/alto/service/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flask==2.2.2 -kafka-python==2.0.2 -networkx==2.5 -Werkzeug==2.2.2 diff --git a/src/alto/service/simulador_ietf.py b/src/alto/service/simulador_ietf.py deleted file mode 100644 index b75155e75cae18a94cde6ad4994c6277ef6f8853..0000000000000000000000000000000000000000 --- a/src/alto/service/simulador_ietf.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 - -import os -import re -import random -from time import sleep - -#Definimos variables -n_cambios = 3 -topo = "" -values = [10, 20, 30, 40, -1] -comodin = '"metric1": "' - -def skere(): - # leemos el archivo de ietf - try: - in_file = open('/root/ietf_prueba.json','r') - except Exception as e: - print(e) - return - else: - topo = in_file.read() - finally: - in_file.close() - - # bucle: creamos 3 numeros aleatorios pra seleccionar cambios y 3 valores en una lista para aplicarlos - while 1: - sleep(15) - l_aux = [m.start() for m in re.finditer(r'"metric1": "', topo)] - r_list = [random.randint(0, len(l_aux)) for i in range(3)] - for rn in r_list: - #Aquà buscamos la rn ocurrencia de metric1 en el string - #Generamos un valor aleatorio del 0 al 4 y seleccionamos el respectivo values - #SustituÃmos según patrones (revisar cómo aplicar regex en python) - rval = random.randint(0,4) - topo = topo[:l_aux[rn]+12] + str(values[rval]) + topo[l_aux[rn]+14:] - print(str(rn)) - - #Sobrescribimos sobre ietf2 - with open('/root/ietf2_prueba.json', 'w') as out_file: - err = out_file.write(topo) - out_file.close() - - - -skere() diff --git a/src/alto/service/topology_maps_bgp.py b/src/alto/service/topology_maps_bgp.py deleted file mode 100644 index 49966ee7cd3950e9a2576578427292033e33ec56..0000000000000000000000000000000000000000 --- a/src/alto/service/topology_maps_bgp.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import json -import re -import networkx -import socket -import struct -import hashlib - -from time import sleep -from datetime import datetime -sys.path.append('/home/ubuntu/docker-alto/network_exposure/') -#from bgp.manage_bgp_speaker import ManageBGPSpeaker -sys.path.append('alto-ale/') -from kafka_ale.kafka_api import AltoProducer -#from api_pybatfish import BatfishManager -from yang_alto import RespuestasAlto -from ipaddress import ip_address, IPv4Address - -DEFAULT_ASN = 0 -RR_BGP_0 = "50.50.50.1" -#RR_BGP = BGP_INFO['bgp']['ip'] - - -class TopologyCreator: - - def __init__(self, exabgp_process, mode): - self.exabgp_process = exabgp_process - self.props = {} - self.pids = {} - self.topology = networkx.Graph() - self.cost_map = {} - self.router_ids = [] - # set path where to write result json files - self.topology_writer = TopologyFileWriter('/root/') - if mode: - self.kafka_p = AltoProducer("localhost", "9092") - #self.kafka_p = AltoProducer("localhost", "9093") - self.ts = {} - #self.bfm = BatfishManager() - self.vtag = 0 - self.resp = RespuestasAlto() - - ### Static Methods - - @staticmethod - def discard_message_from_protocol_id(message, discard_protocols): - """Discard message if protocol is inside discard_protocols list""" - return message["protocol-id"] in discard_protocols - - @staticmethod - def get_hex_id(ip): - """Get hexadecimal value for certain IP - :param: ip string""" - return ''.join(['%02x' % int(w) for w in ip.split('.')]) - - @staticmethod - def check_is_hex(hex_value): - try: - int(hex_value, 16) - return True - except ValueError: - return False - - @staticmethod - def split_router_ids(router_id: str): - """some router ids come without IP format. ie.e without dots in it - convert these router_ids to IPs""" - router_id = str(router_id) - if '.' in router_id: - return router_id - router_groups = re.findall('...', router_id) - no_zero_groups = [] - for group in router_groups: - if group.startswith('00'): - no_zero_groups.append(group[2:]) - elif group.startswith('0'): - no_zero_groups.append(group[1:]) - else: - no_zero_groups.append(group) - return '.'.join(no_zero_groups) - - @staticmethod - def check_if_router_id_is_hex(router_id): - return router_id.isnumeric() - - @staticmethod - def hex_to_ip(hex_ip): - hex_ip = hex_ip.strip("0") - addr_long = int(hex_ip, 16) & 0xFFFFFFFF - struct.pack("<L", addr_long) - return socket.inet_ntoa(struct.pack("<L", addr_long)) - - @staticmethod - def reverse_ip(reversed_ip): - l = reversed_ip.split(".") - return '.'.join(l[::-1]) - - - - ### Auxiliar methods - - def ip_type(self, prefix): - ip=prefix.split("/")[0] - return "IPv4" if type(ip_address(ip)) is IPv4Address else "IPv6" - - def obtain_pid(self, router): - """Returns the hashed PID of the router passed as argument. - If the PID was already mapped, it uses a dictionary to access to it. - """ - tsn = int(datetime.timestamp(datetime.now())*1000000) - rid = self.get_hex_id(router) if not self.check_is_hex(router) else router - if rid not in self.ts.keys(): - self.ts[rid] = tsn - else: - tsn = self.ts[rid] - hash_r = hashlib.sha3_384((router + str(tsn)).encode()) - return ('pid%d:%s:%d' % (DEFAULT_ASN, hash_r.hexdigest()[:32], tsn)) - - def create_pid_name(self, lsa, descriptors, area_id): - """Creates partition ID. - with AS number + domain_id + area_id + hexadecimal router_id - """ - routers_id = [] - desc = lsa[descriptors] - for item in desc: - if "router-id" in item: - routers_id.append(item["router-id"]) - autonomous_systems = [item.get("autonomous-system") for item in desc] - domain_ids = [item.get("domain-id", 0) for item in desc] - for router_id, autonomous_system, domain_id in zip(routers_id, autonomous_systems, domain_ids): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(router_id) if not self.check_is_hex(router_id) else router_id) - #pid_name = self.obtain_pid(router_id) - origin = (autonomous_system, domain_id, area_id, router_id) - if pid_name not in self.props: - self.props[pid_name] = [] - self.props[pid_name].append(origin) - - def _get_router_id_from_node_descript_list(self, node_descriptors, key: str): - result = [] - for descriptor in node_descriptors: - for key_d, value in descriptor.items(): - if key_d == key: - #print(value, key_d) - if self.check_if_router_id_is_hex(value): - result.append(self.split_router_ids(value)) - elif "." in value: - result.append(value) - else: - result.append(self.reverse_ip(self.hex_to_ip(value))) - return result - - def parseo_yang(self, mensaje, tipo): - return str(tipo) + 'json{"alto-tid":"1.0","time":' + str(datetime.timestamp(datetime.now())) + ',"host":"altoserver-alberto","' + str(tipo) + '":' + str(mensaje) + '},}' - - - - ### Topology generation and information recopilation functions - - def load_topology(self, lsa, igp_metric): - if lsa.get('ls-nlri-type') == 'bgpls-link': - # Link information - src = self._get_router_id_from_node_descript_list(lsa['local-node-descriptors'], 'router-id') - dst = self._get_router_id_from_node_descript_list(lsa['remote-node-descriptors'], 'router-id') - for i, j in zip(src, dst): - self.topology.add_edge(i, j, weight=igp_metric) - if lsa.get('ls-nlri-type') == 'bgpls-prefix-v4': - # ToDo verify if prefix info is needed and not already provided by node-descriptors - # Node information. Groups origin with its prefixes - origin = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], "router-id") - prefix = self.split_router_ids(lsa['ip-reach-prefix']) - for item in origin: - if item not in self.topology.nodes(): - self.topology.add_node(item) - if 'prefixes' not in self.topology.nodes[item]: - self.topology.nodes[item]['prefixes'] = [] - self.topology.nodes[item]['prefixes'].append(prefix) - if lsa.get('ls-nlri-type') == "bgpls-node": - # If ls-nlri-type is not present or is not of type bgpls-link or bgpls-prefix-v4 - # add node to topology if not present - node_descriptors = self._get_router_id_from_node_descript_list(lsa['node-descriptors'], 'router-id') - self.router_ids.append(node_descriptors) - for node_descriptor in node_descriptors: - if node_descriptor not in self.topology.nodes(): - self.topology.add_node(node_descriptor) - - def load_pid_prop(self, lsa, ls_area_id): - if 'node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='node-descriptors', area_id=ls_area_id) - if 'local-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='local-node-descriptors', area_id=ls_area_id) - if 'remote-node-descriptors' in lsa: - self.create_pid_name(lsa, descriptors='remote-node-descriptors', area_id=ls_area_id) - - def load_pids(self, ipv4db): - # self.pids stores the result of networkmap - for rr_bgp in [RR_BGP_0]: - for prefix, data in ipv4db[rr_bgp]['ipv4'].items(): - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(data['next-hop'])) - #pid_name = self.obtain_pid(data['next-hop']) - tipo=self.ip_type(prefix) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if tipo not in self.pids[pid_name]: - self.pids[pid_name][tipo]=[] - if prefix not in self.pids[pid_name][tipo]: - self.pids[pid_name][tipo].append(prefix) - - def compute_costmap(self): - # shortest_paths is a dict by source and target that contains the shortest path length for - # that source and destination - shortest_paths = dict(networkx.shortest_paths.all_pairs_dijkstra_path_length(self.topology)) - for src, dest_pids in shortest_paths.items(): - src_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(src)) - #src_pid_name = self.obtain_pid(src) - for dest_pid, weight in dest_pids.items(): - dst_pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(dest_pid)) - #dst_pid_name = self.obtain_pid(dest_pid) - if src_pid_name not in self.cost_map: - self.cost_map[src_pid_name] = {} - self.cost_map[src_pid_name][dst_pid_name] = weight - - - - ### RFC7285 functions - def get_costs_map_by_pid(self, pid): - #pid = "pid0:" + str(npid) - #print(pid) - #print(str(self.pids)) - if pid in self.cost_map.keys(): - #print(str(self.pids)) - #print(str(self.cost_map)) - return self.resp.crear_respuesta("filtro", "networkmap-default", self.vtag, str(self.cost_map[pid])) - else: - return "404: Not Found" - - def get_properties(self, pid): - #return str(self.bf.session.q.nodeProperties().answer().frame()) - return "Implementation in proccess. Sorry dude" - - def get_endpoint_costs(self, pid): - return "Implementation in proccess. Sorry dude" - - def get_maps(self): - return ('{"pids_map":' + self.get_pids() + ', "costs_map":' + self.get_costs_map() + '}') - - def get_costs_map(self): - return self.resp.crear_respuesta("cost-map", "networkmap-default", self.vtag, str(self.cost_map)) - - def get_pids(self): - return self.resp.crear_respuesta("pid-map", "networkmap-default", self.vtag, str(self.pids)) - - def get_directory(self): - return self.resp.indice() - - ### Ampliation functions - - def shortest_path(self, a, b): - try: - return networkx.dijkstra_path(self.topology, a, b) - except networkx.exception.NetworkXNoPath as e: - return [] - except Exception as e: - print(e) - return (-1) - - def all_maps(self, topo, src, dst): - ''' - Returns all the diferent paths between src and dest without any edge in common. - The result is a list of paths (each path is represented as a char list, e.g. ['a', 'c', 'd']) - Args: - topo: Topology map - src: node used as source - dst: node used as destination - ''' - map_aux = networkx.Graph(topo) - all_paths = [] - - sh_path = networkx.dijkstra_path(map_aux, src, dst) - while sh_path != []: - cost = 0 - nodo_s = sh_path[0] - for nodo_d in sh_path[1:]: - map_aux.remove_edge(nodo_s, nodo_d) - nodo_s = nodo_d - cost = cost + 1 - - all_paths.append({'path':sh_path, 'cost':cost}) - try: - sh_path = networkx.dijkstra_path(map_aux, src, dst) - except networkx.exception.NetworkXNoPath as e: - sh_path = [] - return all_paths - - - - ### Manager function - - def manage_bgp_speaker_updates(self, mode): - """ - Reads stdout of process exabgp. It reads line by line - Decoded update messages from exabgp are used to build the netwokmap and costmap - :return: - """ - pids_to_load = {RR_BGP_0: {'ipv4': {}}} - while True: - line = self.exabgp_process.stdout.readline().strip() - if b'decoded UPDATE' in line and b'json' in line: - #print(line) - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - decode_line = json.loads(line.split(b'json')[1]) - neighbor_ip_address = decode_line['neighbor']['address']['peer'] - update_msg = decode_line['neighbor']['message']['update'] - if 'announce' in update_msg: - is_bgp_ls = update_msg['announce'].get('bgp-ls bgp-ls') - is_bgp = update_msg['announce'].get('ipv4 unicast') - if 'attribute' in update_msg: - ls_area_id = update_msg['attribute'].get('bgp-ls', {}).get('area-id', 0) - igp_metric = update_msg['attribute'].get('bgp-ls', {}).get("igp-metric", 1) - if is_bgp_ls: - for next_hop_address, nlri in is_bgp_ls.items(): - for prefix in nlri: - if self.discard_message_from_protocol_id(prefix, [4, 5]): - continue - self.load_topology(prefix, igp_metric) - self.load_pid_prop(prefix, ls_area_id) - elif is_bgp: - for next_hop, prefix in is_bgp.items(): - for nlri in prefix: - pids_to_load[neighbor_ip_address]['ipv4'][nlri['nlri']] = {'next-hop': next_hop} - self.load_pids(pids_to_load) - - elif 'withdraw' in update_msg and 'bgp-ls bgp-ls' in update_msg['withdraw']: - for route in update_msg['withdraw']['bgp-ls bgp-ls']: - u=0;v=0 - for field, values in route.items(): - if field == "local-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - u=j - elif field == "remote-node-descriptors": - for n in values: - for i, j in n.items(): - if i == "router-id": - v=j - if u != 0 and v != 0: - try: - self.topology.remove_edge(self.split_router_ids(u), self.split_router_ids(v)) - except: - print("Eje ya removido.") - - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - - if bool(self.cost_map) : - if mode: - self.kafka_p.envio_alto('alto-costes', self.cost_map, 0) - - def manage_ietf_speaker_updates(self): - ''' - Receives topology information from the PCE by the Southaband Interface and creates/updates the graphs - Realizes an iterational analisis, reviewing each network: if two networks are the same but by different protocols, they must to be merged. - Three attributes on each network: dic[ips], dic[interfaces] and graph[links] - ''' - #Diccionario nodo-id:nombre - nodos = {} - #Diccionario nodo-id:[(interfaz, ip)] - tps = {} - #Lista de enlaces - links = [] - full_path = os.path.join("/root/", "ietf_prueba.json") - with open(full_path, 'r') as archivo: - self.vtag = hashlib.sha3_384((str(int(datetime.timestamp(datetime.now())*1000000))).encode()).hexdigest()[:64] - #while True: - deluro = archivo.read() - d_json = json.loads(str(deluro)) - #print("Tipo = " + str(type(d_json)) + "\nMensaje = " + str(d_json)) - ietf_networks = d_json["ietf-network:networks"] - if ietf_networks == '': - return - #Creo un diccionario con todas las redes que hay y lo recorro para buscar las válidas - for net in ietf_networks["network"]: - if "node" in net.keys() and "ietf-network-topology:link" in net.keys(): - for nodo in net["node"]: - #Realizo un macheo de los IDs de los nodos con el nombre y el/los prefijo/s. - nodos[nodo["node-id"]] = nodo["ietf-l3-unicast-topology:l3-node-attributes"]["name"] - tps[nodo["node-id"]] = [] - if "ietf-network-topology:termination-point" in nodo.keys(): - for tp in nodo["ietf-network-topology:termination-point"]: - tps[nodo["node-id"]].append(str(nodos[nodo["node-id"]]) + ' ' + str(tp["tp-id"])) - pid_name = 'pid%d:%s' % (DEFAULT_ASN, self.get_hex_id(nodo["node-id"])) - if pid_name not in self.pids: - self.pids[pid_name] = {} - if 'ipv4' not in self.pids[pid_name]: - self.pids[pid_name]['ipv4']=[] - if nodo['node-id'] not in self.pids[pid_name]['ipv4']: - self.pids[pid_name]['ipv4'].append( nodo['node-id']) - self.topology.add_node(nodo['node-id']) - - # Falta listar los enlaces y guardarlos. - for link in net["ietf-network-topology:link"]: - a,b = link["link-id"].split(" - ") - if a == '' or b == '': - break - a1 = a.split(' ')[0] - b1 = b.split(' ')[0] - for k in nodos.keys(): - if nodos[k] == a1: - a = k - elif nodos[k] == b1: - b = k - links.append(((a,b),link["ietf-l3-unicast-topology:l3-link-attributes"]["metric1"])) - - # Una vez funciona todo, en vez de almacenarlo en diccionarios los guardamos en un grafo. -> Los nodos se pueden ir pasando ya arriba. - # Ahora mismo va todo correcto, falta pasar los a,b a PID en vez de node-id. - for link in links: - self.topology.add_edge(link[0][0], link[0][1], weight=int(link[1])) - - # Hay que revisar qué diccionarios seguirÃan haciendo falta. - # Dado que bgp lo representa con node-id - node-id, quizás es importante unificar la representación que se muestre. (done) - # Qué hacemos con las interfaces? Las mostramos en los ejes o no hace falta? Guardamos una lista de enlaces donde se vean cómo se conectan? - print("Done") - self.compute_costmap() - self.topology_writer.write_same_ips(self.router_ids) - self.topology_writer.write_pid_file(self.pids) - self.topology_writer.write_cost_map(self.cost_map) - print(str(self.get_maps())) - - - -class TopologyFileWriter: - - def __init__(self, output_path): - self.output_path = output_path - self.pid_file = 'pid_file.json' - self.cost_map_file = 'cost_map.json' - self.same_node_ips = "router_ids.json" - - def write_file(self, file_name, content_to_write): - """Writes file_name in output_file""" - full_path = os.path.join(self.output_path, file_name) - with open(full_path, 'w') as out_file: - json.dump(content_to_write, out_file, indent=4) - - def write_pid_file(self, content): - self.write_file(self.pid_file, content) - - def write_cost_map(self, content): - self.write_file(self.cost_map_file, content) - - def write_same_ips(self, content): - self.write_file(self.same_node_ips, content) - - - -if __name__ == '__main__': - speaker_bgp = ManageBGPSpeaker() - exabgp_process = speaker_bgp.check_tcp_connection() - - topology_creator = TopologyCreator(exabgp_process,0) - topology_creator.manage_ietf_speaker_updates() diff --git a/src/alto/service/yang_alto.py b/src/alto/service/yang_alto.py deleted file mode 100644 index 8707be565bf6f59f7522f66949f692a08e835f00..0000000000000000000000000000000000000000 --- a/src/alto/service/yang_alto.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -import json -from datetime import datetime - - -class RespuestasAlto: - - def __init__(self): - self.algo=0 - - - def crear_respuesta(self, tipo, rid, vtag, contenido): - - if tipo == "cost-map": - return self.respuesta_costes(rid, vtag, contenido) - elif tipo == "pid-map": - return self.respuesta_pid(rid, vtag, contenido) - elif tipo == "filtro": - return self.respuesta_filtro(rid, vtag, contenido) - elif tipo == "prop": - return self.respuesta_prop(rid, vtag, contenido) - elif tipo == "endpoint-costs": - return self.respuestar_endpoint_costs(rid, vtag, contenido) - else: - return "" - - - def respuesta_costes(self, rid, vtag, costmap): - ''' - Return a json-YANG structure from a raw costmap. - Parameters: - rid: resource ID of the network map related - vtag: timestamp of the last network map - costmap: dict of PIDs and costs - ''' - - resp = "{'meta':{'type':'alto-costmap+json','dependent-vtag':[{'resource-id':'" + str(rid) + "','tag': '" + str(vtag) +"'}],'cost-type': {'cost-mode' : 'numerical','cost-metric' : 'routingcost'}},'cost-map':" + str(costmap) + "}" - - return resp - - - def respuesta_pid(self, rid, vtag, netmap): - ''' - Return a json-YANG structure from a raw networkmap. - Parameters: - rid: resource ID of the network map - vtag: timestamp of the network map - netmap: dict of PIDs and network reachables - ''' - - resp = "{'meta' : {'type':'alto-networkmap+json','vtag' : [{'resource-id':'" + str(rid) + "','tag':'" + str(vtag) +"'}]},'network-map':" + str(netmap) + "}" - - return resp - - - def respuesta_filtro(self, rid, vtag, fitro, mapa): - return "" - def respuesta_prop(self, rid, vtag, contenido): - return "" - def respuestar_endpoint_costs(self, rid, vtag, costmap): - return "" - - def indice(self): - return '''{"meta" : {"cost-types": {"num-routing": {"cost-mode" : "numerical","cost-metric": "routingcost","description": "My default"},"num-hop": {"cost-mode" : "numerical","cost-metric": "hopcount"},"ord-routing": {"cost-mode" : "ordinal","cost-metric": "routingcost"},"ord-hop": {"cost-mode" : "ordinal","cost-metric": "hopcount"}}},"resources" : {"network-map" : {"uri" : "http://localhost:5000/networkmap","media-type" : "application/alto-networkmap+json","uses": [ "networkmap-default" ]},"cost-map" : {"uri" : "http://localhost:5000/costmap","media-type" : "application/alto-costmap+json","capabilities" : {"cost-constraints" : true,"cost-type-names" : [ "num-routing", "num-hop","ord-routing", "ord-hop" ]},"uses": [ "networkmap-default" ]},"filtered-costs-map" : {"uri" : "http://localhost:5000//costmap/filter/<string:pid>","media-type" : "application/alto-networkmap+json","accepts" : "application/alto-networkmapfilter+json","uses": [ "networkmap-default" ]},"both-map" : {"uri" : "http://localhost:5000//maps","media-types" : ["application/alto-networkmap+json","application/alto-costmap+json"] ,"uses": [ "networkmap-default" ]}}}''' - -