diff --git a/src/bgpls_speaker/.gitlab-ci.yml b/src/bgpls_speaker/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4c461ce56dfe6baf1716eb6deb070288c848faa2
--- /dev/null
+++ b/src/bgpls_speaker/.gitlab-ci.yml
@@ -0,0 +1,224 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build, tag, and push the Docker image to the GitLab Docker registry
+build service:
+  variables:
+    IMAGE_NAME: 'bgpls_speaker' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: build
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile .
+    - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+    - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+  after_script:
+    - docker images --filter="dangling=true" --quiet | xargs -r docker rmi
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+# Apply unit test to the component
+unit_test service:
+  variables:
+    IMAGE_NAME: 'bgpls_speaker' # name of the microservice
+    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+  stage: unit_test
+  needs:
+    - build service
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+    - if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create --driver=bridge teraflowbridge; fi
+
+    # Context-related
+    - if docker container ls | grep crdb; then docker rm -f crdb; else echo "CockroachDB container is not in the system"; fi
+    - if docker volume ls | grep crdb; then docker volume rm -f crdb; else echo "CockroachDB volume is not in the system"; fi
+    - if docker container ls | grep nats; then docker rm -f nats; else echo "NATS container is not in the system"; fi
+
+    # Device-related
+    - if docker container ls | grep context; then docker rm -f context; else echo "context image is not in the system"; fi
+    - if docker container ls | grep device; then docker rm -f device; else echo "device image is not in the system"; fi
+
+    # Pathcomp-related
+    - if docker container ls | grep pathcomp-frontend; then docker rm -f pathcomp-frontend; else echo "pathcomp-frontend image is not in the system"; fi
+    - if docker container ls | grep pathcomp-backend; then docker rm -f pathcomp-backend; else echo "pathcomp-backend image is not in the system"; fi
+
+    # Service-related
+    - if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
+
+  script:
+    - docker pull "cockroachdb/cockroach:latest-v22.2"
+    - docker pull "nats:2.9"
+    - docker pull "$CI_REGISTRY_IMAGE/context:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/device:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG"
+    - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
+
+    # Context preparation
+    - docker volume create crdb
+    - >
+      docker run --name crdb -d --network=teraflowbridge -p 26257:26257 -p 8080:8080
+      --env COCKROACH_DATABASE=tfs_test --env COCKROACH_USER=tfs --env COCKROACH_PASSWORD=tfs123
+      --volume "crdb:/cockroach/cockroach-data"
+      cockroachdb/cockroach:latest-v22.2 start-single-node
+    - >
+      docker run --name nats -d --network=teraflowbridge -p 4222:4222 -p 8222:8222
+      nats:2.9 --http_port 8222 --user tfs --pass tfs123
+    - echo "Waiting for initialization..."
+    - while ! docker logs crdb 2>&1 | grep -q 'finished creating default user \"tfs\"'; do sleep 1; done
+    - docker logs crdb
+    - while ! docker logs nats 2>&1 | grep -q 'Server is ready'; do sleep 1; done
+    - docker logs nats
+    - docker ps -a
+    - CRDB_ADDRESS=$(docker inspect crdb --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CRDB_ADDRESS
+    - NATS_ADDRESS=$(docker inspect nats --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $NATS_ADDRESS
+    - >
+      docker run --name context -d -p 1010:1010
+      --env "CRDB_URI=cockroachdb://tfs:tfs123@${CRDB_ADDRESS}:26257/tfs_test?sslmode=require"
+      --env "MB_BACKEND=nats"
+      --env "NATS_URI=nats://tfs:tfs123@${NATS_ADDRESS}:4222"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/context:$IMAGE_TAG
+    - CONTEXTSERVICE_SERVICE_HOST=$(docker inspect context --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $CONTEXTSERVICE_SERVICE_HOST
+
+    # Device preparation
+    - >
+      docker run --name device -d -p 2020:2020
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/device:$IMAGE_TAG
+    - DEVICESERVICE_SERVICE_HOST=$(docker inspect device --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $DEVICESERVICE_SERVICE_HOST
+
+    # PathComp preparation
+    - >
+      docker run --name pathcomp-backend -d -p 8081:8081
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/pathcomp-backend:$IMAGE_TAG
+    - PATHCOMP_BACKEND_HOST=$(docker inspect pathcomp-backend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $PATHCOMP_BACKEND_HOST
+    - sleep 1
+    - >
+      docker run --name pathcomp-frontend -d -p 10020:10020
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --env "PATHCOMP_BACKEND_HOST=${PATHCOMP_BACKEND_HOST}"
+      --env "PATHCOMP_BACKEND_PORT=8081"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/pathcomp-frontend:$IMAGE_TAG
+    - sleep 1
+    - PATHCOMPSERVICE_SERVICE_HOST=$(docker inspect pathcomp-frontend --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}")
+    - echo $PATHCOMPSERVICE_SERVICE_HOST
+
+    # Service preparation
+    - >
+      docker run --name $IMAGE_NAME -d -p 3030:3030
+      --env "CONTEXTSERVICE_SERVICE_HOST=${CONTEXTSERVICE_SERVICE_HOST}"
+      --env "DEVICESERVICE_SERVICE_HOST=${DEVICESERVICE_SERVICE_HOST}"
+      --env "PATHCOMPSERVICE_SERVICE_HOST=${PATHCOMPSERVICE_SERVICE_HOST}"
+      --volume "$PWD/src/$IMAGE_NAME/tests:/opt/results"
+      --network=teraflowbridge
+      $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
+
+    # Check status before the tests
+    - sleep 5
+    - docker ps -a
+    - docker logs context
+    - docker logs device
+    - docker logs pathcomp-frontend
+    - docker logs pathcomp-backend
+    - docker logs $IMAGE_NAME
+
+    # Run the tests
+    - >
+      docker exec -i $IMAGE_NAME bash -c
+      "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report.xml"
+    - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing"
+
+  coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/'
+  after_script:
+    # Check status after the tests
+    - docker ps -a
+    - docker logs context
+    - docker logs device
+    - docker logs pathcomp-frontend
+    - docker logs pathcomp-backend
+    - docker logs $IMAGE_NAME
+
+    - docker rm -f $IMAGE_NAME
+    - docker rm -f pathcomp-frontend
+    - docker rm -f pathcomp-backend
+    - docker rm -f device
+    - docker rm -f context
+
+    - docker rm -f $IMAGE_NAME crdb nats
+    - docker volume rm -f crdb
+    - docker network rm teraflowbridge
+    - docker volume prune --force
+    - docker image prune --force
+
+  rules:
+    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+    - changes:
+      - src/common/**/*.py
+      - proto/*.proto
+      - src/$IMAGE_NAME/**/*.{py,in,yml}
+      - src/$IMAGE_NAME/Dockerfile
+      - src/$IMAGE_NAME/tests/*.py
+      - manifests/${IMAGE_NAME}service.yaml
+      - .gitlab-ci.yml
+
+  artifacts:
+      when: always
+      reports:
+        junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report.xml
+
+## Deployment of the service in Kubernetes Cluster
+#deploy service:
+#  variables:
+#    IMAGE_NAME: 'service' # name of the microservice
+#    IMAGE_TAG: 'latest' # tag of the container image (production, development, etc)
+#  stage: deploy
+#  needs:
+#    - unit test service
+#    # - integ_test execute
+#  script:
+#    - 'sed -i "s/$IMAGE_NAME:.*/$IMAGE_NAME:$IMAGE_TAG/" manifests/${IMAGE_NAME}service.yaml'
+#    - kubectl version
+#    - kubectl get all
+#    - kubectl apply -f "manifests/${IMAGE_NAME}service.yaml"
+#    - kubectl get all
+#  # environment:
+#  #   name: test
+#  #   url: https://example.com
+#  #   kubernetes:
+#  #     namespace: test
+#  rules:
+#    - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)'
+#      when: manual    
+#    - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"'
+#      when: manual
diff --git a/src/bgpls_speaker/Config.py b/src/bgpls_speaker/Config.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/Config.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/bgpls_speaker/Dockerfile b/src/bgpls_speaker/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..949f791df082b68ca98c8d37967565e1315502e0
--- /dev/null
+++ b/src/bgpls_speaker/Dockerfile
@@ -0,0 +1,118 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Maven install stage
+#
+# ----------------------------------------------
+FROM maven:3.6.0-jdk-11-slim AS build
+
+RUN mkdir -p /var/teraflow/java_speaker/
+WORKDIR /var/teraflow/java_speaker
+RUN mkdir -p protocols/
+COPY netphony-network-protocols/src protocols/src
+COPY netphony-network-protocols/pom.xml protocols/pom.xml
+
+# RUN mvn dependency:resolve
+# RUN mvn clean verify
+
+WORKDIR /var/teraflow/java_speaker/protocols
+RUN mvn clean install
+
+RUN mkdir -p /var/teraflow/java_speaker
+WORKDIR /var/teraflow/java_speaker
+
+COPY src/bgpls_speaker/service/resources/java/netphony-topology-dev src/
+COPY src/bgpls_speaker/service/resources/java/netphony-topology-dev/pom.xml pom.xml
+COPY src/bgpls_speaker/service/resources/java/netphony-topology-dev/target/generated-sources/ src/target/generated-sources/
+
+RUN mvn package -X
+# -------------------------------------------
+# jar created in  /netphony-topology-dev/target/bgp-ls-speaker-jar-with-dependencies.jar
+
+#
+# Stage 2
+#
+
+FROM python:3.9-slim
+
+
+# Install dependencies
+RUN apt-get --yes --quiet --quiet update && \
+    apt-get --yes --quiet --quiet install wget g++ && \
+    rm -rf /var/lib/apt/lists/*
+
+# Set Python to show logs as they occur
+ENV PYTHONUNBUFFERED=0
+
+# Download the gRPC health probe
+RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \
+    wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
+    chmod +x /bin/grpc_health_probe
+
+# Get generic Python packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install --upgrade setuptools wheel
+RUN python3 -m pip install --upgrade pip-tools
+
+# Install OpenJDK-11
+RUN apt-get update && \
+    apt-get install -y openjdk-11-jre-headless && \
+    apt-get clean;
+
+COPY --from=build /var/teraflow/java_speaker/target/topology-1.4.0-SNAPSHOT.jar /var/teraflow/java_speaker/
+
+# Get common Python packages
+# Note: this step enables sharing the previous Docker build steps among all the Python components
+WORKDIR /var/teraflow
+COPY common_requirements.in common_requirements.in
+RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in
+RUN python3 -m pip install -r common_requirements.txt
+
+# Add common files into working directory
+WORKDIR /var/teraflow/common
+COPY src/common/. ./
+RUN rm -rf proto
+
+# Create proto sub-folder, copy .proto files, and generate Python code
+RUN mkdir -p /var/teraflow/common/proto
+WORKDIR /var/teraflow/common/proto
+RUN touch __init__.py
+COPY proto/*.proto ./
+RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto
+RUN rm *.proto
+RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \;
+
+# Create component sub-folders, get specific Python packages
+RUN mkdir -p /var/teraflow/bgpls_speaker
+WORKDIR /var/teraflow/bgpls_speaker
+COPY src/bgpls_speaker/requirements.in requirements.in
+RUN pip-compile --quiet --output-file=requirements.txt requirements.in
+RUN python3 -m pip install -r requirements.txt
+
+# Java module necessary config files
+WORKDIR /var/teraflow/bgpls_speaker
+RUN mkdir -p /resources
+COPY src/bgpls_speaker/service/resources/* /resources/ 
+
+# Add component files into working directory
+WORKDIR /var/teraflow
+COPY src/context/. context/
+COPY src/device/. device/
+COPY src/pathcomp/frontend/. pathcomp/frontend/
+COPY src/service/. service/
+COPY src/bgpls_speaker/. bgpls_speaker/
+
+# Start the service
+ENTRYPOINT ["python", "-m", "bgpls_speaker.service"]
diff --git a/src/bgpls_speaker/__init__.py b/src/bgpls_speaker/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/bgpls_speaker/bgpls_deploy.sh b/src/bgpls_speaker/bgpls_deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..63c1ada9d8bee9c033fc36a63d8df2ec5ac24769
--- /dev/null
+++ b/src/bgpls_speaker/bgpls_deploy.sh
@@ -0,0 +1,274 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
+
+# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
+# By default, only basic components are deployed
+export TFS_COMPONENTS="bgpls_speaker"
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="bgpls"
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+# If not already set, disable skip-build flag to rebuild the Docker images.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-""}
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+# echo "Deleting and Creating a new namespace..."
+# kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
+# kubectl create namespace $TFS_K8S_NAMESPACE
+# printf "\n"
+
+echo "Deploying components and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
+echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
+PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+
+
+    echo "  Building Docker image..."
+    BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+    docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+
+
+    echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
+
+    
+    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+    echo " Image url '$IMAGE_URL'"
+
+    TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+    docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+    PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+    docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+
+    echo "  Adapting '$COMPONENT' manifest file..."
+    MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
+    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+    echo " Manifest file '$MANIFEST'"
+    
+    IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+    VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
+    sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+
+    # TODO: harmonize names of the monitoring component
+
+    echo "  Deploying '$COMPONENT' component to Kubernetes..."
+    DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+    echo "  Namespace: '$TFS_K8S_NAMESPACE' "
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+
+    echo "  Collecting env-vars for '$COMPONENT' component..."
+
+    SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json)
+    if [ -z "${SERVICE_DATA}" ]; then continue; fi
+
+    # Env vars for service's host address
+    SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+    if [ -z "${SERVICE_HOST}" ]; then continue; fi
+    ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+    echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+    # Env vars for service's 'grpc' port (if any)
+    SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+    if [ -n "${SERVICE_PORT_GRPC}" ]; then
+        ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+    fi
+
+    # Env vars for service's 'http' port (if any)
+    SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
+    if [ -n "${SERVICE_PORT_HTTP}" ]; then
+        ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
+    fi
+
+    printf "\n"
+done
+
+echo "Deploying extra manifests..."
+for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
+    echo "Processing manifest '$EXTRA_MANIFEST'..."
+    if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then
+        kubectl apply -f $EXTRA_MANIFEST
+    else
+        kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
+    fi
+    printf "\n"
+done
+printf "\n"
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Waiting for '$COMPONENT' component..."
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    kubectl wait --namespace $TFS_K8S_NAMESPACE \
+        --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
+    printf "\n"
+done
+
+if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
+    echo "Configuring WebUI DataStores and Dashboards..."
+    sleep 5
+
+    # Exposed through the ingress controller "tfs-ingress"
+    GRAFANA_URL="127.0.0.1:80/grafana"
+
+    # Default Grafana credentials
+    GRAFANA_USERNAME="admin"
+    GRAFANA_PASSWORD="admin"
+
+    # Configure Grafana Admin Password
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
+    GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}"
+
+    echo ">> Updating Grafana 'admin' password..."
+    curl -X PUT -H "Content-Type: application/json" -d '{
+        "oldPassword": "'${GRAFANA_PASSWORD}'",
+        "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
+        "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
+    }' ${GRAFANA_URL_DEFAULT}/api/user/password
+    echo
+    echo
+
+    # Updated Grafana API URL
+    GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}"
+    echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
+
+    echo ">> Installing Scatter Plot plugin..."
+    curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \
+        ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install
+    echo
+
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
+    QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}"
+    echo ">> Creating datasources..."
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-mon-kpi",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_MONITORING_KPIS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": true,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-slc-grp",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_SLICE_GROUPS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
+    echo ">> Creating dashboards..."
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    printf "\n\n"
+
+    echo ">> Staring dashboards..."
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    printf "\n\n"
+fi
diff --git a/src/bgpls_speaker/client/BgplsClient.py b/src/bgpls_speaker/client/BgplsClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..504fed890f75ee98fcba3afbeaf8f1e445fed94b
--- /dev/null
+++ b/src/bgpls_speaker/client/BgplsClient.py
@@ -0,0 +1,91 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty, Service, ServiceId
+from common.proto.service_pb2_grpc import ServiceServiceStub
+from common.proto.bgpls_pb2_grpc import BgplsServiceStub
+from common.proto.bgpls_pb2 import BgplsSpeaker, DiscoveredDeviceList,DiscoveredLinkList,BgplsSpeakerId, NodeDescriptors
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class BgplsClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.BGPLS)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.BGPLS)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.info('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.info('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = BgplsServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def ListDiscoveredDevices(self, request: Empty) -> DiscoveredDeviceList:
+        LOGGER.info('ListDiscoveredDevices request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.ListDiscoveredDevices(request)
+        LOGGER.info('ListDiscoveredDevices result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response    
+    @RETRY_DECORATOR
+    def ListDiscoveredLinks(self, request: Empty) -> DiscoveredLinkList:
+        LOGGER.info('ListDiscoveredDevices request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.ListDiscoveredLinks(request)
+        LOGGER.info('ListDiscoveredDevices result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+    @RETRY_DECORATOR
+    def AddBgplsSpeaker(self, request: BgplsSpeaker) -> str:
+        LOGGER.info('AddBgplsSpeaker request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.AddBgplsSpeaker(request)
+        LOGGER.info('AddBgplsSpeaker result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response    
+    @RETRY_DECORATOR
+    def ListBgplsSpeakers(self, request: Empty) -> BgplsSpeakerId:
+        LOGGER.info('ListBgplsSpeakers request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.ListBgplsSpeakers(request)
+        LOGGER.info('ListBgplsSpeakers result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+    @RETRY_DECORATOR
+    def DisconnectFromSpeaker(self, request: BgplsSpeaker) -> bool:
+        LOGGER.info('DisconnectFromSpeaker request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.DisconnectFromSpeaker(request)
+        LOGGER.info('DisconnectFromSpeaker result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+    @RETRY_DECORATOR
+    def GetSpeakerInfoFromId(self, request: BgplsSpeakerId) -> BgplsSpeaker:
+        LOGGER.info('GetSpeakerInfoFromId request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetSpeakerInfoFromId(request)
+        LOGGER.info('GetSpeakerInfoFromId result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+    @RETRY_DECORATOR
+    def NotifyAddNodeToContext(self, request: NodeDescriptors) -> str:
+        LOGGER.info('NotifyAddNodeToContext request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.NotifyAddNodeToContext(request)
+        LOGGER.info('NotifyAddNodeToContext result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
\ No newline at end of file
diff --git a/src/bgpls_speaker/client/__init__.py b/src/bgpls_speaker/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/client/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/bgpls_speaker/quick_deploy.sh b/src/bgpls_speaker/quick_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..73c5e9881c4516fde24a936d649aa6f82193d08b
--- /dev/null
+++ b/src/bgpls_speaker/quick_deploy.sh
@@ -0,0 +1,438 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
+
+# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
+# By default, only basic components are deployed
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device automation monitoring pathcomp service slice compute webui load_generator"}
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+# If not already set, disable skip-build flag to rebuild the Docker images.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-"YES"}
+
+# If TFS_SKIP_BUILD is "YES", select the containers to be build
+# Any other container will use previous docker images
+export TFS_QUICK_COMPONENTS="compute"
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# If not already set, set the namespace where NATS will be deployed.
+export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# If not already set, set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
+
+# If not already set, set the database username to be used for QuestDB.
+export QDB_USERNAME=${QDB_USERNAME:-"admin"}
+
+# If not already set, set the database user's password to be used for QuestDB.
+export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
+
+# If not already set, set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
+
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+echo "Deleting and Creating a new namespace..."
+kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
+kubectl create namespace $TFS_K8S_NAMESPACE
+printf "\n"
+
+echo "Create secret with CockroachDB data"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with NATS data"
+NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
+    --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
+printf "\n"
+
+echo "Create secret with QuestDB data"
+QDB_HTTP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}')
+QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+METRICSDB_HOSTNAME="questdb-public.${QDB_NAMESPACE}.svc.cluster.local"
+kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=QDB_NAMESPACE=${QDB_NAMESPACE} \
+    --from-literal=METRICSDB_HOSTNAME=${METRICSDB_HOSTNAME} \
+    --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \
+    --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \
+    --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \
+    --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \
+    --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \
+    --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \
+    --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD}
+printf "\n"
+
+echo "Deploying components and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
+echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
+PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+
+    if [ "$TFS_SKIP_BUILD" != "YES" ]; then
+        echo "  Building Docker image..."
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+        if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+        elif [ "$COMPONENT" == "pathcomp" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
+            docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
+
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
+            docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
+            # next command is redundant, but helpful to keep cache updated between rebuilds
+            IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
+            docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
+            docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
+
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
+            docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
+        else
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+        fi
+
+        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
+
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
+            docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
+            docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        else
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+            docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        fi
+    else 
+        for QUICK_COMPONENT in $TFS_QUICK_COMPONENTS; do
+            if [ "$COMPONENT" == "$QUICK_COMPONENT" ]; then
+                
+                echo "  Building Docker image..."
+                BUILD_LOG="$TMP_LOGS_FOLDER/build_${QUICK_COMPONENT}.log"
+
+                docker build -t "$QUICK_COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$QUICK_COMPONENT"/Dockerfile . > "$BUILD_LOG"
+                echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
+
+                
+
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$QUICK_COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${QUICK_COMPONENT}.log"
+                docker tag "$QUICK_COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${QUICK_COMPONENT}.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            fi
+        done
+    fi
+
+    echo "  Adapting '$COMPONENT' manifest file..."
+    MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
+    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+
+    if [ "$COMPONENT" == "pathcomp" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    elif [ "$COMPONENT" == "dlt" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    else
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    fi
+
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+
+    # TODO: harmonize names of the monitoring component
+
+    echo "  Deploying '$COMPONENT' component to Kubernetes..."
+    DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+
+    echo "  Collecting env-vars for '$COMPONENT' component..."
+
+    SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json)
+    if [ -z "${SERVICE_DATA}" ]; then continue; fi
+
+    # Env vars for service's host address
+    SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+    if [ -z "${SERVICE_HOST}" ]; then continue; fi
+    ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+    echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+    # Env vars for service's 'grpc' port (if any)
+    SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+    if [ -n "${SERVICE_PORT_GRPC}" ]; then
+        ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+    fi
+
+    # Env vars for service's 'http' port (if any)
+    SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
+    if [ -n "${SERVICE_PORT_HTTP}" ]; then
+        ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
+    fi
+
+    printf "\n"
+done
+
+echo "Deploying extra manifests..."
+for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
+    echo "Processing manifest '$EXTRA_MANIFEST'..."
+    if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then
+        kubectl apply -f $EXTRA_MANIFEST
+    else
+        kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
+    fi
+    printf "\n"
+done
+printf "\n"
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Waiting for '$COMPONENT' component..."
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    kubectl wait --namespace $TFS_K8S_NAMESPACE \
+        --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
+    printf "\n"
+done
+
+if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
+    echo "Configuring WebUI DataStores and Dashboards..."
+    sleep 5
+
+    # Exposed through the ingress controller "tfs-ingress"
+    GRAFANA_URL="127.0.0.1:80/grafana"
+
+    # Default Grafana credentials
+    GRAFANA_USERNAME="admin"
+    GRAFANA_PASSWORD="admin"
+
+    # Configure Grafana Admin Password
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
+    GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}"
+
+    echo ">> Updating Grafana 'admin' password..."
+    curl -X PUT -H "Content-Type: application/json" -d '{
+        "oldPassword": "'${GRAFANA_PASSWORD}'",
+        "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
+        "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
+    }' ${GRAFANA_URL_DEFAULT}/api/user/password
+    echo
+    echo
+
+    # Updated Grafana API URL
+    GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}"
+    echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
+
+    echo ">> Installing Scatter Plot plugin..."
+    curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \
+        ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install
+    echo
+
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
+    QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}"
+    echo ">> Creating datasources..."
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-mon-kpi",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_MONITORING_KPIS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": true,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-slc-grp",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_SLICE_GROUPS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
+    echo ">> Creating dashboards..."
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    printf "\n\n"
+
+    echo ">> Staring dashboards..."
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    printf "\n\n"
+fi
diff --git a/src/bgpls_speaker/requirements.in b/src/bgpls_speaker/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..7fd1019f8db73f7de0383a613de6c160c0b4dd7e
--- /dev/null
+++ b/src/bgpls_speaker/requirements.in
@@ -0,0 +1,29 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+anytree==2.8.0
+APScheduler==3.8.1
+#fastcache==1.1.0
+ncclient==0.6.13
+python-json-logger==2.0.2
+pytz==2021.3
+xmltodict==0.12.0
+
+
+# pip's dependency resolver does not take into account installed packages.
+# p4runtime does not specify the version of grpcio/protobuf it needs, so it tries to install latest one
+# adding here again grpcio==1.47.* and protobuf==3.20.* with explicit versions to prevent collisions
+grpcio==1.47.*
+protobuf==3.20.*
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/BgplsService.py b/src/bgpls_speaker/service/BgplsService.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ac4d1c5bab56cfea2a0457893691175a5dfdfbb
--- /dev/null
+++ b/src/bgpls_speaker/service/BgplsService.py
@@ -0,0 +1,31 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from bgpls_speaker.service.tools.DiscoveredDBManager import DiscoveredDBManager
+from bgpls_speaker.service.tools.GrpcServer import GrpcServer
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_port_grpc
+from common.proto.bgpls_pb2_grpc import add_BgplsServiceServicer_to_server
+from common.tools.service.GenericGrpcService import GenericGrpcService
+from .BgplsServiceServicerImpl import BgplsServiceServicerImpl
+
+class BgplsService(GenericGrpcService):
+    def __init__(self, discoveredDB : DiscoveredDBManager,
+                 speakerServer : GrpcServer,cls_name: str = __name__) -> None:
+        port = get_service_port_grpc(ServiceNameEnum.BGPLS) # El enum en common.constants
+        super().__init__(port, cls_name=cls_name)
+        self.bgpls_servicer = BgplsServiceServicerImpl(discoveredDB,speakerServer)
+
+    def install_servicers(self):
+        add_BgplsServiceServicer_to_server(self.bgpls_servicer, self.server)
diff --git a/src/bgpls_speaker/service/BgplsServiceServicerImpl.py b/src/bgpls_speaker/service/BgplsServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f09e998a77596477df1652f549d7a0fb3367899
--- /dev/null
+++ b/src/bgpls_speaker/service/BgplsServiceServicerImpl.py
@@ -0,0 +1,185 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, json, logging
+from typing import Optional
+from bgpls_speaker.service.tools.DiscoveredDBManager import  DiscoveredDBManager, GetContextDevices
+from bgpls_speaker.service.tools.GrpcServer import GrpcServer
+from bgpls_speaker.service.tools.json_loader import getInterfaceFromJson, getInterfaceFromNodeNames
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
+from common.method_wrappers.ServiceExceptions import AlreadyExistsException, InvalidArgumentException
+from common.proto.context_pb2 import (DeviceId, Empty, EndPointId, Link, LinkId, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, TopologyId,ContextId,Topology
+    ,Device,DeviceDriverEnum, Uuid)
+from common.proto.pathcomp_pb2 import PathCompRequest
+from common.proto.service_pb2_grpc import ServiceServiceServicer
+
+from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from pathcomp.frontend.client.PathCompClient import PathCompClient
+
+from common.proto.bgpls_pb2 import (BgplsSpeaker,BgplsSpeakerId, DiscoveredDeviceList,DiscoveredDevice,
+                                    DiscoveredLink,DiscoveredLinkList,NodeDescriptors,BgplsSpeakerList)
+from common.proto.bgpls_pb2_grpc import BgplsServiceServicer
+
+# from .task_scheduler.TaskScheduler import TasksScheduler
+# from .tools.ContextGetters import get_service
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'RPC')
+
+class BgplsServiceServicerImpl(BgplsServiceServicer):
+    def __init__(self,discoveredDB : DiscoveredDBManager,
+                 speakerServer : GrpcServer) -> None:
+        LOGGER.debug('Creating Servicer...')
+        self.speaker_handler_factory = 1
+        self.speaker_server=speakerServer
+        self.discoveredDB=discoveredDB
+        LOGGER.debug('Servicer Created')
+
+    # Crear metodos que mandan la topologia por grpc
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListDiscoveredDevices(self, request : Empty, context : grpc.ServicerContext) -> DiscoveredDeviceList:
+        """
+        Get devices discovered from bgpls protocol
+        """
+        device_names=self.discoveredDB.GetNodeNamesFromDiscoveredDB()
+        # devices_names = [DiscoveredDevice(nodeName=device_name) for device_name in device_names]
+        
+        nodes = self.discoveredDB.GetNodesFromDiscoveredDB()
+        devices = [DiscoveredDevice(nodeName=node.node_name,igpID=node.igp_id,learntFrom=node.learnt_from) for node in nodes]
+
+        # LOGGER.debug("(ListDiscoveredDevices) Get discoveredDB manager %s",devices)
+        return DiscoveredDeviceList(discovereddevices=devices)
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListDiscoveredLinks(self, request : Empty, context : grpc.ServicerContext) -> DiscoveredLinkList:
+        """
+        Get links discovered from bgpls protocol
+        """
+        self.discoveredDB.UpdateNodeNameInLink()
+        links = self.discoveredDB.GetLinksFromDiscoveredDB()
+        links_info=[]
+        for link in links:
+            local=NodeDescriptors(bgplsID=link.local.bgpls_id,nodeName=link.local.node_name)
+            remote=NodeDescriptors(bgplsID=link.remote.bgpls_id,nodeName=link.remote.node_name)
+            links_info.append(DiscoveredLink(local=local,remote=remote,learntFrom=link.learnt_from))
+
+        # LOGGER.debug("(ListDiscoveredLinks) Get discoveredDB manager %s",links_info)
+        return DiscoveredLinkList(discoveredlinks=links_info)
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def AddBgplsSpeaker(self, request : BgplsSpeaker, context : grpc.ServicerContext) -> BgplsSpeakerId:
+        """
+        Creates a new connection with an speaker with the given ip address, port and as.
+        Returns de id of the speaker created (to kill proccess¿)
+        """
+        LOGGER.debug("(AddBgplsSpeaker) Create speaker instance %s",request)
+
+        speaker_id=self.speaker_server.connectToJavaBgpls(request.address,request.port,request.asNumber)
+        return BgplsSpeakerId(id=speaker_id)
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def ListBgplsSpeakers(self, request : Empty, context : grpc.ServicerContext) -> BgplsSpeakerId:
+        """
+        Returns a list of the IDs of the BGP-LS speakers with open connections. 
+        """
+        speaker_list=[]
+        bgpls_speaker_list=[]
+        speaker_list=self.speaker_server.getSpeakerListIds()
+        # if(speaker_list==None)
+        #     return
+
+        for speaker in speaker_list:
+            bgpls_speaker_list.append(BgplsSpeakerId(id=speaker))
+
+        return BgplsSpeakerList(speakers=bgpls_speaker_list)
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def DisconnectFromSpeaker(self, request : BgplsSpeaker, context : grpc.ServicerContext) -> bool:
+        """
+        Disconencts from the BGP-LS speaker given its ipv4 address.
+        """
+        speaker_id=self.speaker_server.getSpeakerIdFromIpAddr(request.address)
+        self.speaker_server.terminateRunnerById(speaker_id)
+        return Empty()
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def GetSpeakerInfoFromId(self, request : BgplsSpeakerId, context : grpc.ServicerContext) -> BgplsSpeaker:
+        """
+        Get the address, port and as number of the speaker given its id.
+        """
+        address,as_number,port=self.speaker_server.getSpeakerFromId(request.id)
+        return BgplsSpeaker(address=address,port=port,asNumber=as_number)
+    
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def NotifyAddNodeToContext(self, request : DiscoveredDevice, context : grpc.ServicerContext) :
+        """
+        When a node is added to context via bgpls module this function checks if there are other nodes in the
+        topology connected by links discovered via bgpls. Then, if the link exist adds it to the context.
+        TODO: get endpoints from pce module
+        """
+        node_name=request.nodeName
+        LOGGER.debug("(NotifyAddNodeToContext) Find links to nodes ")
+        nodes_conected=self.discoveredDB.FindConnectedNodes(node_name)
+        # Check if nodes are in context
+        context_client=ContextClient()
+        context_client.connect()
+        # devices=context_client.ListDevices(Empty())
+        device_names,devices_ips=GetContextDevices(context_client)
+        LOGGER.debug("(NotifyAddNodeToContext) Devices in context: %s", device_names)
+        
+        nodes_conected_in_context=list(set(nodes_conected) & set(device_names))
+        LOGGER.debug("(NotifyAddNodeToContext) nodes_conected_in_context: %s", nodes_conected_in_context)
+        # TODO: next to function
+        for remote_node in nodes_conected_in_context:
+
+            # TODO: get endpoints connected to remote ip (pce¿)
+            end_point1="eth-1/0/20"
+            end_point2="eth-1/0/20"
+
+            data=getInterfaceFromJson()
+            # LOGGER.debug ("(NotifyAddNodeToContext) data json loader: %s ",data)
+            interface_src,interface_dst=getInterfaceFromNodeNames(data,node_name,remote_node)
+            LOGGER.debug ("(NotifyAddNodeToContext) interfaces: %s ---> %s ",interface_src,interface_dst)
+
+            # End of enpoints todo
+
+            end_point_uuid1=Uuid(uuid=end_point1)
+            end_point_uuid2=Uuid(uuid=end_point2)
+
+            link_name_src_dest=node_name+"/"+end_point1+"=="+remote_node+"/"+end_point2
+            device_uuid_src=DeviceId(device_uuid=Uuid(uuid=node_name))
+
+            link_name_dest_src=remote_node+"/"+end_point2+"=="+node_name+"/"+end_point1
+            device_uuid_dest=DeviceId(device_uuid=Uuid(uuid=remote_node))
+            
+
+            end_point_id1=EndPointId(endpoint_uuid=end_point_uuid1,device_id=device_uuid_src)
+            end_point_id2=EndPointId(endpoint_uuid=end_point_uuid2,device_id=device_uuid_dest)
+
+            end_point_ids_src_dest=[end_point_id1,end_point_id2]
+            end_point_ids_dest_src=[end_point_id2,end_point_id1]
+
+            link_id_src=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_src_dest)),
+                                                    link_endpoint_ids=end_point_ids_src_dest))
+            
+            link_id_dst=context_client.SetLink(Link(link_id=LinkId(link_uuid=Uuid(uuid=link_name_dest_src)),
+                                                    link_endpoint_ids=end_point_ids_dest_src))
+
+            LOGGER.debug("(NotifyAddNodeToContext) Link set id src--->dst: %s", link_id_src)
+        # self.discoveredDB.DeleteNodeFromDiscoveredDB(node_name)
+        context_client.close()
+        return Empty()
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/__init__.py b/src/bgpls_speaker/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/service/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/bgpls_speaker/service/__main__.py b/src/bgpls_speaker/service/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6c8296a4c15647ca761742245a2550aadb0c988
--- /dev/null
+++ b/src/bgpls_speaker/service/__main__.py
@@ -0,0 +1,83 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading, time
+from prometheus_client import start_http_server
+from common.Constants import ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port,
+    wait_for_environment_variables)
+from context.client.ContextClient import ContextClient
+from .tools.DiscoveredDBManager import DiscoveredDBManager
+from .BgplsService import BgplsService
+from .tools.JavaRunner import JavaRunner
+from .tools.GrpcServer import GrpcServer
+
+
+# from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
+# from .service_handlers import SERVICE_HANDLERS
+
+terminate = threading.Event()
+LOGGER : logging.Logger = None
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received') # cerrar tb java
+    LOGGER.warning(signal)
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    # wait_for_environment_variables([
+    #     get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_HOST     ),
+    #     get_env_var_name(ServiceNameEnum.CONTEXT,  ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    #     get_env_var_name(ServiceNameEnum.DEVICE,   ENVVAR_SUFIX_SERVICE_HOST     ),
+    #     get_env_var_name(ServiceNameEnum.DEVICE,   ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    #     get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST     ),
+    #     get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC),
+    # ])
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+    # One common database for all bgpls_speakers connection
+    DB=DiscoveredDBManager()
+    
+    speaker_server = GrpcServer(DB)
+    speaker_server.Connect()
+    speaker_server.connectToJavaBgpls("10.95.90.76")
+
+    grpc_service = BgplsService(DB,speaker_server)
+    grpc_service.start()
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=0.1): pass
+    LOGGER.info('Terminating...')
+    speaker_server.terminateGrpcServer()
+    grpc_service.stop()
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/bgpls_speaker/service/resources/.gitignore b/src/bgpls_speaker/service/resources/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..2b5b17ab59e88d44c5704004131d4f5bab1bd4b5
--- /dev/null
+++ b/src/bgpls_speaker/service/resources/.gitignore
@@ -0,0 +1,6 @@
+java/*
+java/**
+java/**/*
+java/**/**/*
+java
+/java/netphony-topology-dev/*
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml b/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml
new file mode 100644
index 0000000000000000000000000000000000000000..bb74215ee0e991cfa7da8e17ab0624bea9e865f7
--- /dev/null
+++ b/src/bgpls_speaker/service/resources/BGP4Parameters_3.xml
@@ -0,0 +1,51 @@
+<config>
+	<!-- TCP port where the BGP is listening for incoming bgp4 connections. Optional Parameter. Default value: 179 (BGP Port)  --> 
+	<BGP4Port>12179</BGP4Port>
+	<BGPIdentifier>1.1.1.1</BGPIdentifier>
+<!-- TCP port to connect to manage the BGP connection. Default value: 1112 -->
+	<BGP4ManagementPort>1112</BGP4ManagementPort> 
+	<!-- Peers to which this Peer is going to establish connection -->
+    <configPeer>
+                <peer>10.95.86.214</peer>
+                <export>false</export>
+                <import>true</import>
+                <peerPort>179</peerPort>
+    </configPeer>
+
+	<!-- Ficheros log (servidor, protocolo PCEP y OSPF). Campos opcionales-->
+	<BGP4LogFile>BGP4Parser2.log</BGP4LogFile><!-- Default value: BGP4Parser.log -->
+	<BGP4LogFileClient>BGP4Client2.log</BGP4LogFileClient><!-- Default value: BGP4Client.log-->
+	<BGP4LogFileServer>BGP4Server2.log</BGP4LogFileServer><!-- Default value: BGP4Server.log-->
+	<!-- If the tcp no delay option is used or not. Optional Parameter. Default value: false. -->
+	<nodelay>true</nodelay>
+	<!-- Waiting Time to re-connect to clients. Default value: 6000 ms. -->
+	<delay>40000</delay>
+	<setTraces>true</setTraces>
+	<!-- OPEN Parameters -->
+	<!-- RFC 4271. This 2-octet unsigned integer indicates the number of seconds the sender proposes for the value of the Hold Timer.  
+		Upon receipt of an OPEN message, a BGP speaker MUST calculate the value of the Hold Timer by using the smaller of its configured
+         Hold Time and the Hold Time received in the OPEN message.  The Hold Time MUST be either zero or at least three seconds.  An
+         implementation MAY reject connections on the basis of the Hold Time.  The calculated value indicates the maximum number of
+         seconds that may elapse between the receipt of successive KEEPALIVE and/or UPDATE messages from the sender. -->
+	<holdTime>180</holdTime><!-- Optional Parameter. Default value: 3. -->
+	<!-- RFC 4271. This 1-octet unsigned integer indicates the protocol version number of the message.  The current BGP version number is 4. -->
+	<version>4</version><!-- Optional Parameter. Default value: 4. -->
+	<!-- RFC 4271.  This 2-octet unsigned integer indicates the Autonomous System number of the sender.-->
+	<myAutonomousSystem>65006</myAutonomousSystem>
+	<!-- RFC 4271. This 4-octet unsigned integer indicates the BGP Identifier of the sender.  A given BGP speaker sets the value of its BGP
+         Identifier to an IP address that is assigned to that BGP speaker.  The value of the BGP Identifier is determined upon
+         startup and is the same for every local interface and BGP peer. --> 
+	<!--<BGPIdentifier>192.168.1.200</BGPIdentifier> -->
+	<!-- If the peer is in charge of sending its topology (only the interdomain Links) to the other BGP peer it is connected to. Default: false -->
+	<sendTopology>false</sendTopology>
+	<!-- If the peer is in charge of sending its whole topology to the other BGP peer it is connected to. Default: false -->
+	<sendIntradomainLinks>true</sendIntradomainLinks>
+	<!-- Optional Parameter. How to learn the topology. Possibilities: fromXML, fromBGP. Default: fromBGP -->
+	<learnTopology>fromBGP</learnTopology>	
+	<!-- Topology network to read. It is mandatory if and only if learnTopology parameter is fromXML. -->
+	<!--<topologyFile>src/test/resources/network1.xml</topologyFile>-->
+	<!-- Optional Parameter. Instance Identifier for node and link NLRI. See rfc 6549. Default value: 0-->
+	<!--<instanceID>0</instanceID>-->
+	<!-- Optional Parameter. Default value: localhost -->
+	<localBGPAddress>0.0.0.0</localBGPAddress>
+</config>
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/resources/TMConfiguration_guillermo.xml b/src/bgpls_speaker/service/resources/TMConfiguration_guillermo.xml
new file mode 100644
index 0000000000000000000000000000000000000000..20d85402d88c46ad0a21d2eb708f920da5c09b87
--- /dev/null
+++ b/src/bgpls_speaker/service/resources/TMConfiguration_guillermo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<config>
+    <managementIP>localhost</managementIP>
+    <managementPort>5007</managementPort>
+
+    <XML>
+        <mode>IP</mode>
+	    <XMLFileTopology>network1.xml</XMLFileTopology>
+    </XML>
+     <InitFrom>
+        <Init>initFromXML</Init>
+    </InitFrom>
+    <WSOld>
+	<ip>localhost</ip>
+	<port>9876</port>
+    </WSOld>
+    <COP>
+        <serve_port>8087</serve_port>
+    </COP>
+    <BGPLS>
+        <Reader>True</Reader>
+        <Writer>True</Writer>
+        <BGPLSconfigFile>BGP4Parameters_3.xml</BGPLSconfigFile>
+    </BGPLS>
+   
+</config>
diff --git a/src/bgpls_speaker/service/resources/bgp_ls.jar b/src/bgpls_speaker/service/resources/bgp_ls.jar
new file mode 100644
index 0000000000000000000000000000000000000000..06000cf98a1b592e80fd96eee059d4200301f352
Binary files /dev/null and b/src/bgpls_speaker/service/resources/bgp_ls.jar differ
diff --git a/src/bgpls_speaker/service/resources/interfaces.json b/src/bgpls_speaker/service/resources/interfaces.json
new file mode 100644
index 0000000000000000000000000000000000000000..3cd5e0840037a84f42fd258f2b2cd6cffd5515d7
--- /dev/null
+++ b/src/bgpls_speaker/service/resources/interfaces.json
@@ -0,0 +1,49 @@
+{
+    "devices":[
+        {"name":"HL2-1-1","IP-Addr":"10.95.90.76",
+        "interfaces":{
+        "99.1.2.1":"GigabitEthernet0/0/0/0",
+        "99.1.3.1":"GigabitEthernet0/0/0/1",
+        "99.1.11.1":"GigabitEthernet0/0/0/2"},
+        "links":{
+            "GigabitEthernet0/0/0/0":"HL2-2-1",
+            "GigabitEthernet0/0/0/1":"HL2-3-1",
+            "GigabitEthernet0/0/0/2":"HL2-1-2"
+        }
+    },
+        {"name":"HL2-3-1","IP-Addr":"10.95.90.78",
+        "interfaces":{
+            "99.3.4.3":"GigabitEthernet0/0/0/0",
+            "99.1.3.3":"GigabitEthernet0/0/0/1",
+            "99.3.33.3":"GigabitEthernet0/0/0/2",
+            "99.3.52.2":"GigabitEthernet0/0/0/3"},
+            "links":{
+                "GigabitEthernet0/0/0/0":"HL2-4-1",
+                "GigabitEthernet0/0/0/1":"HL2-1-1",
+                "GigabitEthernet0/0/0/2":"HL2-3-2",
+                "GigabitEthernet0/0/0/3":"HL3-1-1"
+        }},
+        {"name":"HL2-2-2","IP-Addr":"10.95.90.81",
+        "interfaces":{
+            "99.1.12.2":"ge-0/0/0",
+            "99.2.24.2":"ge-0/0/1",
+            "99.2.22.22":"ge-0/0/2",
+            "99.2.99.2":"ge-0/0/9"},
+            "links":{
+                "ge-0/0/0":"HL2-1-2",
+                "ge-0/0/1":"HL2-4-2",
+                "ge-0/0/2":"HL2-2-1",
+                "ge-0/0/9":"HL2-4-2_OSPF"
+                }},
+        {"name":"HL2-2-1","IP-Addr":"10.95.90.77",
+        "interfaces":{
+            "99.1.2.2":"Gi0/0/0/0",
+            "99.2.4.2":"Gi0/0/0/1",
+            "99.2.22.2":"Gi0/0/0/2"},
+            "links":{
+                "Gi0/0/0/0":"HL2-1-1",
+                "Gi0/0/0/1":"HL2-4-1",
+                "Gi0/0/0/2":"HL2-2-2"
+                }}
+    ]
+}
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/DiscoveredDBManager.py b/src/bgpls_speaker/service/tools/DiscoveredDBManager.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3238bf464d9041f3512d0a4165867b66896edc8
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/DiscoveredDBManager.py
@@ -0,0 +1,239 @@
+
+from typing import List
+from bgpls_speaker.service.tools.Tools import UpdateRequest,NodeInfo,LinkInfo
+from common.proto.bgpls_pb2 import NodeDescriptors
+from common.proto.context_pb2 import ContextId, ContextList,Topology,TopologyId,Device,DeviceDriverEnum,ContextId,Empty, TopologyList
+from common.Constants import DEFAULT_CONTEXT_NAME
+from common.tools.object_factory.Context import json_context_id
+from context.client.ContextClient import ContextClient
+
+import logging
+LOGGER = logging.getLogger(__name__)
+
+class DiscoveredDBManager:
+    def __init__(self):
+        self.discoveredDB=[]
+        # Añadir topoDB
+
+    def AddToDB(self,update_request : UpdateRequest):
+        """
+        Add BGP Update message to discoveredDB. Checks if node exists in discoveredDB.
+        TODO: check if node exists in context
+        """
+        # TODO: with self.lock
+        # Check if node info message
+        if(self.checkIfNodeInUpdate(update_request)):
+            # Check if node exists
+            to_add=True
+            for node in update_request.nodes:
+                if(self.CheckIfNodeNameInDb(node) or CheckIfNodeInContext(node.node_name)):
+                    # Replace info from node if exists
+                    LOGGER.debug("(AddToDB) Node already in DB!!!")
+                    to_add=False
+                else:
+                    LOGGER.debug("(AddToDB) Node NOT in DB!!!")
+            if(to_add):
+                self.discoveredDB.append(update_request)
+        else:
+            # is a link
+            # Compare and update
+            self.discoveredDB.append(update_request)
+        return True
+    
+    def GetDiscoveredDB(self):
+        return self.discoveredDB
+    
+    def checkIfNodeInUpdate(self,update_request : UpdateRequest):
+        """
+        Returns true if the update message contains a node info type .
+        """
+        if(update_request.nodes):
+            return True
+        return False
+    
+    
+    def CheckIfNodeNameInDb(self,new_node : NodeInfo) -> bool:
+        """
+        Returns true if new node is in the discovered data base already
+        """
+        for update in self.discoveredDB:
+            for node in update.nodes:
+                if(node.igp_id==new_node.igp_id):
+                    return True
+        return False
+    
+    def GetNodeNamesFromDiscoveredDB(self):
+        """
+        Return a list of node_names from the current discovered devices
+        saved in the discoveredDB
+        """
+        node_list =[update_request.nodes for update_request in self.discoveredDB if update_request.nodes]
+        # LOGGER.info("nodes (GetNodeNamesFromDiscoveredDB) %s",node_list )
+        # Inside an update there is a list of nodes , TODO posible FIX:
+        node_info= [node for nodes in node_list for node in nodes]
+        return [node.node_name for node in node_info]
+    
+    def GetNodesFromDiscoveredDB(self):
+        """
+        Return a list of nodes of class type: tools.NodeInfo from the current discovered devices
+        saved in the discoveredDB. Skips the ones already addded to context.
+        """
+
+        node_list =[update_request.nodes for update_request in self.discoveredDB if update_request.nodes]
+        return [node for nodes in node_list for node in nodes if (not CheckIfNodeInContext(node.node_name))]
+    
+    def GetLinksFromDiscoveredDB(self):
+        """
+        Return a list of links of class type: tools.LinkInfo from the current discovered links
+        saved in the discoveredDB
+        """
+        link_list= [update_request.links for update_request in self.discoveredDB if update_request.links]
+        return [link for links in link_list for link in links]
+    
+    def UpdateDiscoveredDBWithContext(self):
+        """
+        Check if device discovered by bgpls is already in the topology.
+        """
+        # device_names,device_ips=AddContextDevices(context_client)
+        return True
+    
+    def GetNodeNameFromLinkId(self,link_igpid):
+        """
+        Return the node name given an igp id if exists in the discoveredDB.   
+        """
+        for update in self.discoveredDB:
+            for node in update.nodes:
+                if(node.igp_id==link_igpid):
+                    return node.node_name
+        return None
+    
+    def UpdateNodeNameInLink(self):
+        """
+        Check if the igp id has a node name asigned in the discoveredDB and
+        assign it to the NodeDescriptor name.
+        """
+        for update in self.discoveredDB:
+            for link in update.links:
+                if(self.GetNodeNameFromLinkId(link.local.bgpls_id) is not None):
+                    LOGGER.info("(UpdateNodeNameInLink) local %s:  %s",link.local.bgpls_id, self.GetNodeNameFromLinkId(link.local.bgpls_id))
+                    link.local.node_name=self.GetNodeNameFromLinkId(link.local.bgpls_id)
+                else:
+                     link.local.node_name=link.local.bgpls_id
+                if(self.GetNodeNameFromLinkId(link.remote.bgpls_id) is not None):
+                    LOGGER.info("(UpdateNodeNameInLink) remote %s:  %s",link.remote.bgpls_id, self.GetNodeNameFromLinkId(link.remote.bgpls_id))
+                    link.remote.node_name=self.GetNodeNameFromLinkId(link.remote.bgpls_id)
+                else:
+                    link.remote.node_name=link.remote.bgpls_id
+        return True
+
+    def RemoveLinkFromDB(self):
+        """
+        Removes a link from the DB if matches the source and the destination.
+        """
+        return True
+    
+    def FindConnectedNodes(self,new_node):
+        """
+        Returns a list of nodes connected to the actual node using the discovered 
+        link list. Returns None in case there are no connections.
+        """
+        # encontrar links donde aparezca el nodo 
+        links_to_node=[]
+        nodes_conected=[]
+        for update in self.discoveredDB:
+            for link in update.links:
+                if(link.local.node_name == new_node):
+                    links_to_node.append(link)
+                    nodes_conected.append(link.remote.node_name)
+                if(link.remote.node_name == new_node):
+                    links_to_node.append(link)
+                    nodes_conected.append(link.local.node_name)
+        
+        if(nodes_conected):
+            LOGGER.debug("(FindConnectedNodes) links to local node:%s",new_node)
+            LOGGER.debug("(FindConnectedNodes) %s", nodes_conected)
+            return nodes_conected
+        LOGGER.debug("(FindConnectedNodes) NO LINKS TO OTHER NODES")
+        return None
+
+    def DeleteNodeFromDiscoveredDB(self, node_name) -> bool:
+        """
+        Deletes a node from de DiscoveredDB given the node name. TODO: igpid¿
+        """
+        LOGGER.info("(DeleteNodeFromDiscoveredDB)")
+        
+        for i,update in enumerate(self.discoveredDB):
+            for node in update.nodes:
+                if(node_name==node.node_name):
+                    del self.discoveredDB[i]
+        return True
+
+def AddContextDevicesFull(context_client : ContextClient) -> bool:
+    """
+    debug purposes
+    """
+    LOGGER.info("(AddContextDevices)")
+    contexts : ContextList = context_client.ListContexts(Empty())
+    for context_ in contexts.contexts:
+        context_uuid : str = context_.context_id.context_uuid.uuid
+        context_name : str = context_.name
+        topologies : TopologyList = context_client.ListTopologies(context_.context_id)
+    # topologies : TopologyList=context_client.ListTopologies(context_client)
+        for topology_ in topologies.topologies:
+            #topology_uuid : str = topology_.topology_id.topology_uuid.uuid
+            topology_name : str = topology_.name
+            context_topology_name  = 'Context({:s}):Topology({:s})'.format(context_name, topology_name)
+            # Topos=context.GetTopology(list_topo.topology_id)
+            LOGGER.debug("topo (AddContextDevices) %s",topology_)
+            # details=context_client.GetTopologyDetails(topology_.topology_id)
+            # LOGGER.info("details (AddContextDevices) %s",details)
+            devices=context_client.ListDevices(Empty())
+            # LOGGER.info("devices (driverSettings) %s",devices)
+            device_names=[]
+            device_ips=[]
+            for device_ in devices.devices:
+                LOGGER.info("device_ (AddContextDevices) %s",device_.name)
+                device_names.append(device_.name)
+                for config_rule_ in device_.device_config.config_rules:
+                    if config_rule_.custom.resource_key == "_connect/address":
+                        LOGGER.info("device_.resource_value-addr (driverSettings) %s",
+                        config_rule_.custom.resource_value)
+                        device_ips=config_rule_.custom.resource_value
+                        
+        return device_names,device_ips
+
+
+
+def GetContextDevices(context_client : ContextClient) -> bool:
+    """
+    Returns de device name and its corresponding device_ip existing in context.
+    """
+    LOGGER.info("(AddContextDevices)")
+    devices=context_client.ListDevices(Empty())
+    device_names=[]
+    device_ips=[]
+    for device_ in devices.devices:
+        LOGGER.debug("device_ (AddContextDevices) %s",device_.name)
+        device_names.append(device_.name)
+        for config_rule_ in device_.device_config.config_rules:
+            if config_rule_.custom.resource_key == "_connect/address":
+                LOGGER.info("device_.resource_value-addr (driverSettings) %s",
+                config_rule_.custom.resource_value)
+                device_ips=config_rule_.custom.resource_value
+                    
+    return device_names,device_ips
+
+def CheckIfNodeInContext(node_name) -> bool:
+    """
+    Returns true if the node exists in the context.
+    """
+    context_client=ContextClient()
+    context_client.connect()
+    device_names,device_ips=GetContextDevices(context_client)
+    LOGGER.info("(CheckIfNodeInContext) device_names: %s nodena %s",device_names,node_name)
+    for node in device_names:
+        if(node==node_name):
+            LOGGER.info("(CheckIfNodeInContext) Node already in context")
+            return True
+    LOGGER.info("(CheckIfNodeInContext) Node NOT in context")
+    return False
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/GrpcServer.py b/src/bgpls_speaker/service/tools/GrpcServer.py
new file mode 100644
index 0000000000000000000000000000000000000000..166b7351732263e11bce44db3aa21792681ea9ce
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/GrpcServer.py
@@ -0,0 +1,201 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging,threading, queue,time,signal
+from datetime import datetime, timedelta
+from typing import Any, Iterator, List, Optional, Tuple, Union
+# from apscheduler.executors.pool import ThreadPoolExecutor
+# from apscheduler.job import Job
+# from apscheduler.jobstores.memory import MemoryJobStore
+# from apscheduler.schedulers.background import BackgroundScheduler
+# from common.method_wrappers.Decorator import MetricTypeEnum, MetricsPool, metered_subclass_method, INF
+# from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type
+
+
+import logging,threading
+import grpc
+
+from bgpls_speaker.service.tools.DiscoveredDBManager import DiscoveredDBManager
+from .protos import grpcService_pb2_grpc
+from .protos import grpcService_pb2
+from .Tools import UpdateRequest
+
+from concurrent import futures
+import os
+import subprocess
+from multiprocessing import Pool
+import logging
+
+from .JavaRunner import JavaRunner
+
+LOGGER = logging.getLogger(__name__)
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+SERVER_ADDRESS = 'localhost:2021'
+
+class GrpcServer():
+
+    """
+    This class gets the current topology from a bgps speaker module in java 
+    and updates the posible new devices to add in the context topology.
+    Needs the address, port and as_number from the device that will provide the information via bgpls
+    to the java module.
+    """
+    def __init__(self,DiscoveredDB : DiscoveredDBManager) -> None: # pylint: disable=super-init-not-called
+        self.__lock = threading.Lock()
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        self.__out_samples = queue.Queue()
+        self.__server=grpc.aio.server()
+        # self.__address="10.95.86.214"
+        # self.__port=179
+        # self.__asNumber=65006
+        # self.__configFile="TMConfiguration_guillermo.xml"
+        # self.__process=0
+        self.__javaLocalPort=0 # --> BGP4Port in XML file
+        self.__mngPort=0 # Port used in XML config file for management (NOT used in TFS)
+        self.__runnerList=[]
+        # Data base for saving all new devices discovered
+        self.__discoveredDB=DiscoveredDB
+        # self.__comms=grpcComms
+        # Este tendría que tener la info del runner al que se connecta¿
+    
+    def ConnectThread(self) -> bool:
+        # TODO: Metodos necesarios para conectarte al speaker
+        # If started, assume it is already connected
+        if self.__started.is_set(): return True
+        self.__started.set() #notifyAll -->event.is_set()
+        # 10 workers ?
+        self.__server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+        grpcService_pb2_grpc.add_updateServiceServicer_to_server(self, self.__server)
+        self.__server.add_insecure_port(SERVER_ADDRESS)
+        # server.add_secure_port(SERVER_ADDRESS)
+        LOGGER.info("Starting server on %s", SERVER_ADDRESS)
+        self.__server.start()
+        try:
+            while True:
+                time.sleep(_ONE_DAY_IN_SECONDS)
+        except KeyboardInterrupt:
+            LOGGER.info("DISCONNECT")
+            self.Disconnect()
+        return True
+
+    def Connect(self):
+        grpcThread = threading.Thread(target=self.ConnectThread)
+        grpcThread.start()
+        return True
+
+    def Disconnect(self) -> bool:
+        self.__terminate.set()
+        # If not started, assume it is already disconnected
+        if not self.__started.is_set(): return True
+        LOGGER.info("Keyboard interrupt, stop server")
+        self.__server.stop(0)
+        # Disconnect triggers deactivation of sampling events
+        # self.__scheduler.shutdown()
+        # exit(0)
+        return True
+
+    def update(self,request, context) -> bool:
+        """
+        Processes the messages recived by de grpc server
+        """
+        with self.__lock:
+            #TODO: Get update
+            LOGGER.info("(server) Update message from bgpls speaker: \n %s" % (request))
+            response = grpcService_pb2.updateResponse(ack="OK")
+            update_request = UpdateRequest.from_proto(request)
+            self.__discoveredDB.AddToDB(update_request)
+            # LOGGER.debug("Update class string %s",update_request.toString())
+            return response
+    
+
+
+    def connectToJavaBgpls(self, address : str = "10.95.86.214", port : str = "179", asNumber : str = "65006"):
+
+        # Get unused* port
+        self.setLocalPort()
+        runner = JavaRunner(self.__javaLocalPort,address,self.__mngPort)
+        # Sets port in XML config file for java program
+        runner.setAsNumber(asNumber)
+        runner.setPort(port)
+        runner.setPeer()
+        process=runner.execBGPLSpeaker()
+        self.__runnerList.append(runner)
+
+        return process.pid
+
+    def terminateRunners(self):
+        for runner in self.__runnerList:
+            runner.endBGPSpeaker()
+        return True
+    
+    def terminateGrpcServer(self):
+        LOGGER.debug("Terminating java programs...")
+        self.terminateRunners()
+        LOGGER.debug("Disconnecting grpc server...")
+        self.Disconnect() 
+        return True
+
+    def terminateRunnerById(self,speaker_id):
+        """
+        Disconnect from BGP-LS speaker given an speaker Id. Its the same
+        as the java running proccess PID.
+        """
+        for runner in self.__runnerList:
+            if(runner.getPid()==speaker_id):
+                runner.endBGPSpeaker()
+                self.__runnerList.remove(runner)
+
+        return True
+        
+    def setLocalPort(self,initPort=12179):
+        """
+        If java already running add 1 to current used port,
+        else initialize port .
+        initPort --> BGP4Port, usually 179 corresponding to BGP
+        """
+        with self.__lock:
+            if(self.__runnerList):
+                LOGGER.debug("Port exists %s",self.__javaLocalPort)
+                lastRunner=self.__runnerList[-1]
+                self.__javaLocalPort=lastRunner.getCurrentLocalPort()+1
+                self.__mngPort=lastRunner.getCurrentMngPort()+1
+            else:
+                LOGGER.debug("Port DONT exists %s",self.__javaLocalPort)
+                self.__javaLocalPort=initPort
+                self.__mngPort=1112 # default management port
+            return self.__javaLocalPort
+        
+    def getSpeakerListIds(self):
+        return [runner.getPid() for runner in self.__runnerList]
+
+    def getSpeakerFromId(self,speaker_id):
+        """
+        Returns address,as_number,peer_port
+        """
+        for runner in self.__runnerList:
+            if(runner.getPid()==speaker_id):
+                return runner.getRunnerInfo()
+        return None
+    
+    def getSpeakerIdFromIpAddr(self,addr):
+        """
+        Returns Id from the speaker IP Address
+        """
+        for runner in self.__runnerList:
+            ip_addr,asN,port=runner.getRunnerInfo()
+            if(ip_addr==addr):
+                return runner.getPid()
+        return
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/JavaRunner.py b/src/bgpls_speaker/service/tools/JavaRunner.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f777e77a660e10bda9d10df3c2ae8dc36945ddd
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/JavaRunner.py
@@ -0,0 +1,141 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging,threading, queue,time,signal
+from datetime import datetime, timedelta
+from typing import Any, Iterator, List, Optional, Tuple, Union
+import logging
+import grpc
+
+from concurrent import futures
+from lxml import etree
+import os
+import subprocess
+from multiprocessing import Pool
+
+
+SERVER_ADDRESS = 'localhost:2021'
+SERVER_ID = 1
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+XML_FILE="/var/teraflow/bgpls_speaker/service/resources/BGP4Parameters_3.xml"
+XML_CONFIG_FILE="TMConfiguration_guillermo.xml"
+
+LOGGER = logging.getLogger(__name__)
+
+
+class JavaRunner:
+
+    def __init__(self,localPort : int, address : str ="10.95.86.214", mngPort : int = 1112):
+
+        self.__peerPort=179
+        self.__localPort=localPort 
+        self.__managementPort=mngPort
+        # To support multiple speakers at same time 
+        # Add 1 to port and then pass port to subproccess call
+        self.__configFile=XML_CONFIG_FILE
+        self.__process=0
+        self.__lock = threading.Lock()
+        self.__address = address
+        self.__portConf=5007
+        self.__asNumber=65006
+
+    def getCurrentLocalPort(self):
+        with self.__lock:
+            return self.__localPort
+
+    def getCurrentMngPort(self):
+        with self.__lock:
+            return self.__managementPort
+    def getPid(self):
+        return self.__process.pid
+
+    def execAndKill(self):
+
+        LOGGER.debug("Before exec and kill")
+        os.chdir("/var/teraflow/bgpls_speaker/service/resources/")
+        cwd = os.getcwd()
+        LOGGER.info("Current working directory: %s", cwd)
+        # Security shell=False
+        self.__process=subprocess.Popen(['java -jar bgp_ls.jar '+ XML_CONFIG_FILE],
+                shell=False,start_new_session=True,stdout=subprocess.PIPE)
+        LOGGER.debug("Time to sleep")
+        java_pid = self.__process.pid
+        print("Java PID:", java_pid)    
+        time.sleep(15)
+        self.__process.terminate()
+
+
+    def execBGPLSpeaker(self) -> bool:
+            """
+            Executes java BGPLS speaker in non-blocking process
+            """
+            # CHECKEAR muchas cosas
+            LOGGER.debug("Before exec")
+            os.chdir("/var/teraflow/bgpls_speaker/service/resources/")
+            # Security reasons shell=False
+            self.__process=subprocess.Popen(['java' , '-jar' , 'bgp_ls.jar' , XML_CONFIG_FILE],
+                shell=False,start_new_session=True)
+            return self.__process
+
+    def setPort(self,port):
+         self.__peerPort=port
+         return True
+    def setAsNumber(self,asNumber):
+         self.__asNumber=asNumber
+         return True
+
+    def setPeer(self) -> bool:
+            """
+            Sets XML existing config file with peer address and port. TODO: as_number
+            """
+
+            XMLParser = etree.XMLParser(remove_blank_text=False)
+            tree = etree.parse(XML_FILE, parser=XMLParser)
+            root = tree.getroot()
+            peerAddress = root.find(".//peer")
+            peerAddress.text=self.__address
+            peerPort = root.find(".//peerPort")
+            peerPort.text=str(self.__peerPort)
+            localPort = root.find(".//BGP4Port")
+            localPort.text=str(self.__localPort)
+            myAutonomousSystem = root.find(".//myAutonomousSystem")
+            myAutonomousSystem.text=str(self.__asNumber)
+            managePort = root.find(".//BGP4ManagementPort")
+            managePort.text=str(self.__managementPort)
+            tree.write(XML_FILE) #with ... as ..
+
+            # XMLParser = etree.XMLParser(remove_blank_text=False)
+            # tree = etree.parse("TMConfiguration_guillermo.xml", parser=XMLParser)
+            # root = tree.getroot()
+            # portConf = root.find(".//managementPort")
+            # portConf.text=str(self.__portConf)
+            # tree.write("TMConfiguration_guillermo.xml")
+            return True
+
+    def endBGPSpeaker(self) -> bool:
+            """
+            Kills java program connected to BGPLS Speaker with SIGKILL signal
+            """
+            LOGGER.debug("sending kill signal to process %s",self.__process.pid)
+            # time.sleep(15)
+            LOGGER.debug("PID: %d",self.__process.pid)
+            # LOGGER.debug("Group PID: %d",os.getpgid(self.__process.pid))
+            # os.killpg(os.getpgid(self.__process.pid), signal.SIGKILL)
+            self.__process.kill()
+            # .terminate() for SIGTERM
+            return True
+    
+    def getRunnerInfo(self):
+         return self.__address,self.__asNumber,self.__peerPort
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/Tools.py b/src/bgpls_speaker/service/tools/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fb724aacf9e36bdd28b875a3a6d89b4053dd585
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/Tools.py
@@ -0,0 +1,180 @@
+
+from .protos import grpcService_pb2_grpc
+from .protos import grpcService_pb2
+
+import logging
+LOGGER = logging.getLogger(__name__)
+import os
+
+# AUTOGENERATED
+class UpdateRequest:
+    def __init__(self, address_family_id, next_hop, as_path_segment, nodes, links):
+        self.address_family_id = address_family_id
+        self.next_hop = next_hop
+        self.as_path_segment = as_path_segment
+        self.nodes = nodes
+        self.links = links
+    
+    @classmethod
+    def from_proto(cls, update_request):
+        nodes = []
+        for node in update_request.node:
+            nodes.append(NodeInfo.from_proto(node))
+
+        links = []
+        for link in update_request.link:
+            links.append(LinkInfo.from_proto(link))
+
+        return cls(
+            address_family_id=update_request.addressFamilyID,
+            next_hop=update_request.nextHop,
+            as_path_segment=update_request.asPathSegment,
+            nodes=nodes,
+            links=links
+        )
+    def toString(self):
+      # Debug purposes
+      out = ""
+      out+=self.address_family_id
+      out+=self.next_hop
+      out+=self.as_path_segment
+
+      for node in self.nodes:
+          out+=node.node_name
+          out+=node.igp_id
+          out+=str(node.bgpls_id)
+          out+=str(node.as_id)
+      for link in self.links:
+          out+=link.remote_id
+          out+=link.local_id
+          out+=link.remote_ipv4_id
+          out+=link.local_ipv4_id
+          out+=str(link.local.as_number)
+          out+=link.local.bgpls_id
+          out+=link.remote.as_number
+          out+=link.remote.bgpls_id
+          out+=str(link.available_bw)
+          out+=str(link.residual_bw)
+          out+=str(link.utilized)
+          out+=str(link.max_link_delay)
+          out+=str(link.min_link_delay)
+          out+=str(link.delay_variation)
+          out+=str(link.delay)
+          out+=str(link.te_default_metric)
+          out+=str(link.adjacency_sid)
+      return out
+
+class NodeInfo:
+    def __init__(self, node_name, igp_id, bgpls_id, as_id,learnt_from):
+        self.node_name = node_name
+        self.igp_id = igp_id
+        self.bgpls_id = bgpls_id
+        self.as_id = as_id
+        self.learnt_from=learnt_from
+    
+    @classmethod
+    def from_proto(cls, proto_node):
+        return cls(
+            node_name=proto_node.nodeName,
+            igp_id=proto_node.igpID,
+            bgpls_id=proto_node.bgplsID,
+            as_id=proto_node.asID,
+            learnt_from=proto_node.learntFrom
+        )
+
+class LinkInfo:
+    def __init__(self, remote_id, local_id, remote_ipv4_id, local_ipv4_id, local, remote, available_bw, residual_bw, utilized, max_link_delay, min_link_delay, delay_variation, delay, te_default_metric, adjacency_sid,learnt_from):
+        self.remote_id = remote_id
+        self.local_id = local_id
+        self.remote_ipv4_id = remote_ipv4_id
+        self.local_ipv4_id = local_ipv4_id
+        self.local = local
+        self.remote = remote
+        self.available_bw = available_bw
+        self.residual_bw = residual_bw
+        self.utilized = utilized
+        self.max_link_delay = max_link_delay
+        self.min_link_delay = min_link_delay
+        self.delay_variation = delay_variation
+        self.delay = delay
+        self.te_default_metric = te_default_metric
+        self.adjacency_sid = adjacency_sid
+        self.learnt_from=learnt_from
+    
+    @classmethod
+    def from_proto(cls, proto_link):
+        return cls(
+            remote_id=proto_link.remoteID,
+            local_id=proto_link.localID,
+            remote_ipv4_id=proto_link.remoteIPv4ID,
+            local_ipv4_id=proto_link.localIPv4ID,
+            local=NodeDescriptors.from_proto(proto_link.local),
+            remote=NodeDescriptors.from_proto(proto_link.remote),
+            available_bw=proto_link.availableBw,
+            residual_bw=proto_link.residualBw,
+            utilized=proto_link.utilized,
+            max_link_delay=proto_link.maxLinkDelay,
+            min_link_delay=proto_link.minLinkDelay,
+            delay_variation=proto_link.delayVariation,
+            delay=proto_link.delay,
+            te_default_metric=proto_link.TEDefaultMetric,
+            adjacency_sid=proto_link.adjacencySid,
+            learnt_from=proto_link.learntFrom
+        )
+
+class NodeDescriptors:
+    def __init__(self, as_number, bgpls_id):
+        self.as_number = as_number
+        self.bgpls_id = bgpls_id
+        self.node_name=None
+    @classmethod
+    def from_proto(cls, descriptor):
+        return cls(
+            as_number=descriptor.asNumber,
+            bgpls_id=descriptor.bgplsID
+        )
+
+
+
+"""
+message Device {
+  DeviceId device_id = 1;
+  string name = 2;
+  string device_type = 3;
+  DeviceConfig device_config = 4;
+  DeviceOperationalStatusEnum device_operational_status = 5;
+  repeated DeviceDriverEnum device_drivers = 6;
+  repeated EndPoint device_endpoints = 7;
+  repeated Component component = 8; // Used for inventory
+}
+message DeviceId {
+  Uuid device_uuid = 1;
+}
+message TopologyId {
+  ContextId context_id = 1;
+  Uuid topology_uuid = 2;
+}
+
+message Topology {
+  TopologyId topology_id = 1;
+  string name = 2;
+  repeated DeviceId device_ids = 3;
+  repeated LinkId link_ids = 4;
+}
+
+message TopologyDetails {
+  TopologyId topology_id = 1;
+  string name = 2;
+  repeated Device devices = 3;
+  repeated Link links = 4;
+}
+message LinkId {
+  Uuid link_uuid = 1;
+}
+
+message Link {
+  LinkId link_id = 1;
+  string name = 2;
+  repeated EndPointId link_endpoint_ids = 3;
+}
+"""
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/__init__.py b/src/bgpls_speaker/service/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/bgpls_speaker/service/tools/json_loader.py b/src/bgpls_speaker/service/tools/json_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..a63881866b7fd790a892bb683296940e4641bed2
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/json_loader.py
@@ -0,0 +1,28 @@
+import json
+
+def getInterfaceFromJson():
+    json_file = open('interfaces.json', 'r',encoding='utf-8')
+    interface_data = json.load(json_file)
+    return interface_data
+
+def getInterfaceFromNodeNames(json_file,node_name_src,node_name_dst):
+
+    interface_src=[]
+    interface_dst=[]
+    for device in json_file['devices']:
+        print("dev: %s",device.keys())
+        if device['name'] == node_name_src:
+            interface_src=list(device['links'].keys())[list(device['links'].values()).index(node_name_dst)]
+        if device['name'] == node_name_dst:
+            interface_dst=list(device['links'].keys())[list(device['links'].values()).index(node_name_src)]
+
+    return interface_src,interface_dst
+    
+
+if __name__ == "__main__":
+    data=getInterfaceFromJson()
+    print("data: %s",data['devices'])
+    # for device in data['devices']:
+    #     print(device['interfaces'].keys())
+
+    print(getInterfaceFromNodeNames(data,"HL2-2-1","HL2-2-2"))
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/protos/grpcService.proto b/src/bgpls_speaker/service/tools/protos/grpcService.proto
new file mode 100644
index 0000000000000000000000000000000000000000..c8e01035b14cdd2d3f4089d82ddb512907279ac0
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/protos/grpcService.proto
@@ -0,0 +1,67 @@
+syntax = "proto3";
+package src.main.proto;
+
+//el modulo java abre la comunicacion
+//cliente(java) manda la info al servidor(python)
+//el modulo en python responde con ok
+
+message updateRequest {
+
+	string addressFamilyID = 1;
+	string nextHop = 2;
+	string asPathSegment = 3;
+
+  	repeated nodeInfo node = 4;
+  	// repeated : se da la posibilidad de mandar 0 o varios
+  	repeated linkInfo link = 5;
+	}
+
+message nodeInfo{
+	string nodeName=1;
+	string igpID=2;
+	string bgplsID=3;
+	int32 asID=4;
+	string learntFrom = 5;
+}
+
+message linkInfo{
+
+	string remoteID=1;
+	string localID=2;
+
+	string remoteIPv4ID=3;
+	string localIPv4ID=4;
+
+	NodeDescriptors local=5;
+	NodeDescriptors remote=6;
+
+	float availableBw=7;
+	float residualBw = 8;
+	float utilized = 9;
+
+	float maxLinkDelay = 10;
+	float minLinkDelay = 11;
+	float delayVariation = 12;
+	float delay = 13;
+
+	int32 TEDefaultMetric = 14;
+	string adjacencySid = 15;
+
+	string learntFrom = 16;
+
+}
+
+message NodeDescriptors{
+	string asNumber=1;
+	string bgplsID=2;
+}
+
+message updateResponse {
+  string ack = 1;
+}
+
+// Defining a Service, a Service can have multiple RPC operations
+service updateService {
+  // MODIFY HERE: Update the return to streaming return.
+  rpc update(updateRequest) returns (updateResponse);
+}
\ No newline at end of file
diff --git a/src/bgpls_speaker/service/tools/protos/grpcService_pb2.py b/src/bgpls_speaker/service/tools/protos/grpcService_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f219859999a97087966886770d4208b8bcb71464
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/protos/grpcService_pb2.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: grpcService.proto
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11grpcService.proto\x12\x0esrc.main.proto\"\xa0\x01\n\rupdateRequest\x12\x17\n\x0f\x61\x64\x64ressFamilyID\x18\x01 \x01(\t\x12\x0f\n\x07nextHop\x18\x02 \x01(\t\x12\x15\n\rasPathSegment\x18\x03 \x01(\t\x12&\n\x04node\x18\x04 \x03(\x0b\x32\x18.src.main.proto.nodeInfo\x12&\n\x04link\x18\x05 \x03(\x0b\x32\x18.src.main.proto.linkInfo\"^\n\x08nodeInfo\x12\x10\n\x08nodeName\x18\x01 \x01(\t\x12\r\n\x05igpID\x18\x02 \x01(\t\x12\x0f\n\x07\x62gplsID\x18\x03 \x01(\t\x12\x0c\n\x04\x61sID\x18\x04 \x01(\x05\x12\x12\n\nlearntFrom\x18\x05 \x01(\t\"\x8a\x03\n\x08linkInfo\x12\x10\n\x08remoteID\x18\x01 \x01(\t\x12\x0f\n\x07localID\x18\x02 \x01(\t\x12\x14\n\x0cremoteIPv4ID\x18\x03 \x01(\t\x12\x13\n\x0blocalIPv4ID\x18\x04 \x01(\t\x12.\n\x05local\x18\x05 \x01(\x0b\x32\x1f.src.main.proto.NodeDescriptors\x12/\n\x06remote\x18\x06 \x01(\x0b\x32\x1f.src.main.proto.NodeDescriptors\x12\x13\n\x0b\x61vailableBw\x18\x07 \x01(\x02\x12\x12\n\nresidualBw\x18\x08 \x01(\x02\x12\x10\n\x08utilized\x18\t \x01(\x02\x12\x14\n\x0cmaxLinkDelay\x18\n \x01(\x02\x12\x14\n\x0cminLinkDelay\x18\x0b \x01(\x02\x12\x16\n\x0e\x64\x65layVariation\x18\x0c \x01(\x02\x12\r\n\x05\x64\x65lay\x18\r \x01(\x02\x12\x17\n\x0fTEDefaultMetric\x18\x0e \x01(\x05\x12\x14\n\x0c\x61\x64jacencySid\x18\x0f \x01(\t\x12\x12\n\nlearntFrom\x18\x10 \x01(\t\"4\n\x0fNodeDescriptors\x12\x10\n\x08\x61sNumber\x18\x01 \x01(\t\x12\x0f\n\x07\x62gplsID\x18\x02 \x01(\t\"\x1d\n\x0eupdateResponse\x12\x0b\n\x03\x61\x63k\x18\x01 \x01(\t2X\n\rupdateService\x12G\n\x06update\x12\x1d.src.main.proto.updateRequest\x1a\x1e.src.main.proto.updateResponseb\x06proto3')
+
+
+
+_UPDATEREQUEST = DESCRIPTOR.message_types_by_name['updateRequest']
+_NODEINFO = DESCRIPTOR.message_types_by_name['nodeInfo']
+_LINKINFO = DESCRIPTOR.message_types_by_name['linkInfo']
+_NODEDESCRIPTORS = DESCRIPTOR.message_types_by_name['NodeDescriptors']
+_UPDATERESPONSE = DESCRIPTOR.message_types_by_name['updateResponse']
+updateRequest = _reflection.GeneratedProtocolMessageType('updateRequest', (_message.Message,), {
+  'DESCRIPTOR' : _UPDATEREQUEST,
+  '__module__' : 'grpcService_pb2'
+  # @@protoc_insertion_point(class_scope:src.main.proto.updateRequest)
+  })
+_sym_db.RegisterMessage(updateRequest)
+
+nodeInfo = _reflection.GeneratedProtocolMessageType('nodeInfo', (_message.Message,), {
+  'DESCRIPTOR' : _NODEINFO,
+  '__module__' : 'grpcService_pb2'
+  # @@protoc_insertion_point(class_scope:src.main.proto.nodeInfo)
+  })
+_sym_db.RegisterMessage(nodeInfo)
+
+linkInfo = _reflection.GeneratedProtocolMessageType('linkInfo', (_message.Message,), {
+  'DESCRIPTOR' : _LINKINFO,
+  '__module__' : 'grpcService_pb2'
+  # @@protoc_insertion_point(class_scope:src.main.proto.linkInfo)
+  })
+_sym_db.RegisterMessage(linkInfo)
+
+NodeDescriptors = _reflection.GeneratedProtocolMessageType('NodeDescriptors', (_message.Message,), {
+  'DESCRIPTOR' : _NODEDESCRIPTORS,
+  '__module__' : 'grpcService_pb2'
+  # @@protoc_insertion_point(class_scope:src.main.proto.NodeDescriptors)
+  })
+_sym_db.RegisterMessage(NodeDescriptors)
+
+updateResponse = _reflection.GeneratedProtocolMessageType('updateResponse', (_message.Message,), {
+  'DESCRIPTOR' : _UPDATERESPONSE,
+  '__module__' : 'grpcService_pb2'
+  # @@protoc_insertion_point(class_scope:src.main.proto.updateResponse)
+  })
+_sym_db.RegisterMessage(updateResponse)
+
+_UPDATESERVICE = DESCRIPTOR.services_by_name['updateService']
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  _UPDATEREQUEST._serialized_start=38
+  _UPDATEREQUEST._serialized_end=198
+  _NODEINFO._serialized_start=200
+  _NODEINFO._serialized_end=294
+  _LINKINFO._serialized_start=297
+  _LINKINFO._serialized_end=691
+  _NODEDESCRIPTORS._serialized_start=693
+  _NODEDESCRIPTORS._serialized_end=745
+  _UPDATERESPONSE._serialized_start=747
+  _UPDATERESPONSE._serialized_end=776
+  _UPDATESERVICE._serialized_start=778
+  _UPDATESERVICE._serialized_end=866
+# @@protoc_insertion_point(module_scope)
diff --git a/src/bgpls_speaker/service/tools/protos/grpcService_pb2_grpc.py b/src/bgpls_speaker/service/tools/protos/grpcService_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8bbda558d60b1108bfcb1ff60fbe755bb2d75c3
--- /dev/null
+++ b/src/bgpls_speaker/service/tools/protos/grpcService_pb2_grpc.py
@@ -0,0 +1,70 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+from . import grpcService_pb2 as grpcService__pb2
+
+
+class updateServiceStub(object):
+    """Defining a Service, a Service can have multiple RPC operations
+    """
+
+    def __init__(self, channel):
+        """Constructor.
+
+        Args:
+            channel: A grpc.Channel.
+        """
+        self.update = channel.unary_unary(
+                '/src.main.proto.updateService/update',
+                request_serializer=grpcService__pb2.updateRequest.SerializeToString,
+                response_deserializer=grpcService__pb2.updateResponse.FromString,
+                )
+
+
+class updateServiceServicer(object):
+    """Defining a Service, a Service can have multiple RPC operations
+    """
+
+    def update(self, request, context):
+        """MODIFY HERE: Update the return to streaming return.
+        """
+        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+        context.set_details('Method not implemented!')
+        raise NotImplementedError('Method not implemented!')
+
+
+def add_updateServiceServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+            'update': grpc.unary_unary_rpc_method_handler(
+                    servicer.update,
+                    request_deserializer=grpcService__pb2.updateRequest.FromString,
+                    response_serializer=grpcService__pb2.updateResponse.SerializeToString,
+            ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+            'src.main.proto.updateService', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class updateService(object):
+    """Defining a Service, a Service can have multiple RPC operations
+    """
+
+    @staticmethod
+    def update(request,
+            target,
+            options=(),
+            channel_credentials=None,
+            call_credentials=None,
+            insecure=False,
+            compression=None,
+            wait_for_ready=None,
+            timeout=None,
+            metadata=None):
+        return grpc.experimental.unary_unary(request, target, '/src.main.proto.updateService/update',
+            grpcService__pb2.updateRequest.SerializeToString,
+            grpcService__pb2.updateResponse.FromString,
+            options, channel_credentials,
+            insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/src/bgpls_speaker/tests/.gitignore b/src/bgpls_speaker/tests/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..6b97d6fe3ad32f39097745229ab7f547f26ecb12
--- /dev/null
+++ b/src/bgpls_speaker/tests/.gitignore
@@ -0,0 +1 @@
+# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc.
diff --git a/src/bgpls_speaker/tests/__init__.py b/src/bgpls_speaker/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549d9811aa5d1c193a44ad45d0d7773236c0612
--- /dev/null
+++ b/src/bgpls_speaker/tests/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+