diff --git a/.gitignore b/.gitignore
index 0a116f850780386a9fe1010b22164f4c7dbf8228..a9144d6699af12319a67e8bad5cec982f3ae6a8c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -168,5 +168,8 @@ delete_local_deployment.sh
 local_docker_deployment.sh
 local_k8s_deployment.sh
 
+# asdf configuration
+.tool-versions
+
 # Other logs
 **/logs/*.log.*
diff --git a/manifests/l3_centralizedattackdetectorservice.yaml b/manifests/l3_centralizedattackdetectorservice.yaml
index 95c6d8176ca86c98c1e26d88267c864247ae8b5b..8a3be69b672200120afb4bca3892dd0c08ec2d65 100644
--- a/manifests/l3_centralizedattackdetectorservice.yaml
+++ b/manifests/l3_centralizedattackdetectorservice.yaml
@@ -42,6 +42,8 @@ spec:
           value: "0.5"
         - name: MONITORED_KPIS_TIME_INTERVAL_AGG
           value: "60"
+        - name: TEST_ML_MODEL
+          value: "0"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:10001"]
diff --git a/manifests/teservice.yaml b/manifests/teservice.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..15f1619df08069f00db883f0b918c17837c707d1
--- /dev/null
+++ b/manifests/teservice.yaml
@@ -0,0 +1,81 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: teservice
+spec:
+  selector:
+    matchLabels:
+      app: teservice
+  template:
+    metadata:
+      annotations:
+        config.linkerd.io/skip-inbound-ports: "4189"
+      labels:
+        app: teservice
+    spec:
+      terminationGracePeriodSeconds: 5
+      shareProcessNamespace: true
+      containers:
+      - name: server
+        image: labs.etsi.org:5050/tfs/controller/te:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 10030
+        env:
+        - name: ERLANG_LOGGER_LEVEL
+          value: "debug"
+        - name: ERLANG_COOKIE
+          value: "tfte-unsafe-cookie"
+        - name: ERLANG_NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: ERLANG_NODE_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        readinessProbe:
+          exec:
+            command: ["/tfte/bin/tfte", "status"]
+        livenessProbe:
+          exec:
+            command: ["/tfte/bin/tfte", "status"]
+        resources:
+          requests:
+            cpu: 250m
+            memory: 512Mi
+          limits:
+            cpu: 700m
+            memory: 1024Mi
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: teservice
+spec:
+  type: ClusterIP
+  selector:
+    app: teservice
+  ports:
+  - name: grpc
+    protocol: TCP
+    port: 10030
+    targetPort: 10030
+  - name: pcep
+    protocol: TCP
+    port: 4189
+    targetPort: 4189
diff --git a/my_deploy.sh b/my_deploy.sh
index 7e8abb1282655dd300d48457f655ac932cbb6e68..888fc98903eb665729d7e0843cf9e9fc8b60741d 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -34,6 +34,9 @@ export TFS_COMPONENTS="context device pathcomp service slice compute webui load_
 # Uncomment to activate L3 CyberSecurity
 #export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
 
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
 
diff --git a/proto/.gitignore b/proto/.gitignore
index d1dea37b3a85abaa18b5bd65d3ec0e1d3c6fe9b6..4d6f9cbd73b283a2520ac926f56294a2aa060478 100644
--- a/proto/.gitignore
+++ b/proto/.gitignore
@@ -3,5 +3,8 @@ src/*/*
 # used to prevent breaking symbolic links from source code folders
 !src/*/.gitignore
 !src/python/__init__.py
+!src/erlang/rebar.config
+!src/erlang/rebar.lock
+!src/erlang/src/tfpb.app.src
 
 uml/generated
diff --git a/proto/context.proto b/proto/context.proto
index 9f779d8db310a98ea05682e619b1357c27c76904..55a80470d40463742cc3e034ca9e933f4ff6c3f0 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -273,6 +273,7 @@ enum ServiceTypeEnum {
   SERVICETYPE_L3NM = 1;
   SERVICETYPE_L2NM = 2;
   SERVICETYPE_TAPI_CONNECTIVITY_SERVICE = 3;
+  SERVICETYPE_TE = 4;
 }
 
 enum ServiceStatusEnum {
diff --git a/proto/generate_code_erlang.sh b/proto/generate_code_erlang.sh
new file mode 100755
index 0000000000000000000000000000000000000000..80fb977e44536482888d2963995fd811e95417fd
--- /dev/null
+++ b/proto/generate_code_erlang.sh
@@ -0,0 +1,74 @@
+#!/bin/bash -eu
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+FORCE=0
+DEFAULT_ACTION="generate"
+
+usage() {
+    echo "Usage: $0 [-f] [clean|generate]" 1>&2
+    echo "Options:"
+    echo "  -f: Force regeneration of all protocol buffers"
+    exit 1;
+}
+
+while getopts "fc" o; do
+    case "${o}" in
+        f)
+            FORCE=1
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+ACTION=${1:-$DEFAULT_ACTION}
+cd $(dirname $0)
+ROOT=$(pwd)
+ERLANG_PROTO_DIR="$ROOT/src/erlang"
+BUILD_CHECK="$ERLANG_PROTO_DIR/.generated"
+
+tfpb_clean() {
+    rm -f "$BUILD_CHECK"
+    rm -rf "$ERLANG_PROTO_DIR/src/"*.erl
+    rm -rf "$ERLANG_PROTO_DIR/src/erlang/_build"
+}
+
+tfpb_generate() {
+    if [[ -f "$BUILD_CHECK" && $FORCE != 1 ]]; then
+        echo "Protocol buffer code for Erlang already generated, use -f to force"
+        exit 0
+    fi
+
+    tfpb_clean
+    mkdir -p "$ERLANG_PROTO_DIR"
+    cd "$ERLANG_PROTO_DIR"
+    rebar3 compile
+    rebar3 grpc gen
+    rebar3 compile
+    touch "$BUILD_CHECK"
+
+    echo "Protocol buffer code for Erlang generated"
+}
+
+case "$ACTION" in
+    clean) tfpb_clean;;
+    generate) tfpb_generate;;
+    *) usage;;
+esac
+
diff --git a/proto/l3_attackmitigator.proto b/proto/l3_attackmitigator.proto
index 572d96f9e586dae4a124b1b9de1368b71fb9f0b7..d8ed4baf788a793b6b1451606760256db8ebe089 100644
--- a/proto/l3_attackmitigator.proto
+++ b/proto/l3_attackmitigator.proto
@@ -13,15 +13,14 @@
 // limitations under the License.
 
 syntax = "proto3";
+package l3_attackmitigator;
 
 import "context.proto";
+import "l3_centralizedattackdetector.proto";
 
 service L3Attackmitigator{
-  // Perform Mitigation
-  rpc PerformMitigation (L3AttackmitigatorOutput) returns (context.Empty) {}
-  // Get Mitigation
+  rpc PerformMitigation (L3AttackmitigatorOutput) returns (l3_centralizedattackdetector.StatusMessage) {}
   rpc GetMitigation (context.Empty) returns (context.Empty) {}
-  // Get Configured ACL Rules
   rpc GetConfiguredACLRules (context.Empty) returns (ACLRules) {}
 }
 
diff --git a/proto/l3_centralizedattackdetector.proto b/proto/l3_centralizedattackdetector.proto
index ed99435aa7db6584b381079cb1e3d589fb9998b5..de967aea0812c611d7d969b2c3b20421446e927f 100644
--- a/proto/l3_centralizedattackdetector.proto
+++ b/proto/l3_centralizedattackdetector.proto
@@ -13,18 +13,23 @@
 // limitations under the License.
 
 syntax = "proto3";
+package l3_centralizedattackdetector;
 
 import "context.proto";
 
 service L3Centralizedattackdetector {
   // Analyze single input to the ML model in the CAD component
-  rpc AnalyzeConnectionStatistics (L3CentralizedattackdetectorMetrics) returns (Empty) {}
+  rpc AnalyzeConnectionStatistics (L3CentralizedattackdetectorMetrics) returns (StatusMessage) {}
 
   // Analyze a batch of inputs to the ML model in the CAD component
-  rpc AnalyzeBatchConnectionStatistics (L3CentralizedattackdetectorBatchInput) returns (Empty) {}
+  rpc AnalyzeBatchConnectionStatistics (L3CentralizedattackdetectorBatchInput) returns (StatusMessage) {}
 
   // Get the list of features used by the ML model in the CAD component
-  rpc GetFeaturesIds (Empty) returns (AutoFeatures) {}
+  rpc GetFeaturesIds (context.Empty) returns (AutoFeatures) {}
+
+  // Sets the list of attack IPs in order to be used to compute the prediction accuracy of the
+  // ML model in the CAD component in case of testing the ML model.
+  rpc SetAttackIPs (AttackIPs) returns (context.Empty) {}
 }
 
 message Feature {
@@ -63,6 +68,10 @@ message L3CentralizedattackdetectorBatchInput {
 	repeated L3CentralizedattackdetectorMetrics metrics = 1;
 }
 
-message Empty {
+message StatusMessage {
 	string message = 1;
 }
+
+message AttackIPs {
+	repeated string attack_ips = 1;
+}
\ No newline at end of file
diff --git a/proto/src/erlang/.gitignore b/proto/src/erlang/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b583d287f2c200fe88ad47c7e8d9cfcf15b0135c
--- /dev/null
+++ b/proto/src/erlang/.gitignore
@@ -0,0 +1,5 @@
+*
+!rebar.config
+!rebar.lock
+!src/
+!src/tfpb.app.src
diff --git a/proto/src/erlang/rebar.config b/proto/src/erlang/rebar.config
new file mode 100644
index 0000000000000000000000000000000000000000..55e139eabf9bf5e0bdcb66af889d78bdc8aa9c11
--- /dev/null
+++ b/proto/src/erlang/rebar.config
@@ -0,0 +1,21 @@
+% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+%      http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+{erl_opts, [debug_info]}.
+{deps, [grpcbox]}.
+
+{grpc, [{protos, "../.."},
+        {gpb_opts, [{i, "../.."}, {strbin, true}, {descriptor, true}, {module_name_suffix, "_pb"}]}]}.
+
+{plugins, [grpcbox_plugin]}.
diff --git a/proto/src/erlang/rebar.lock b/proto/src/erlang/rebar.lock
new file mode 100644
index 0000000000000000000000000000000000000000..d353eaf344ffe261be7200c1cb3a1ad76bf80703
--- /dev/null
+++ b/proto/src/erlang/rebar.lock
@@ -0,0 +1,23 @@
+{"1.2.0",
+[{<<"acceptor_pool">>,{pkg,<<"acceptor_pool">>,<<"1.0.0">>},1},
+ {<<"chatterbox">>,{pkg,<<"ts_chatterbox">>,<<"0.12.0">>},1},
+ {<<"ctx">>,{pkg,<<"ctx">>,<<"0.6.0">>},1},
+ {<<"gproc">>,{pkg,<<"gproc">>,<<"0.8.0">>},1},
+ {<<"grpcbox">>,{pkg,<<"grpcbox">>,<<"0.15.0">>},0},
+ {<<"hpack">>,{pkg,<<"hpack_erl">>,<<"0.2.3">>},2}]}.
+[
+{pkg_hash,[
+ {<<"acceptor_pool">>, <<"43C20D2ACAE35F0C2BCD64F9D2BDE267E459F0F3FD23DAB26485BF518C281B21">>},
+ {<<"chatterbox">>, <<"4E54F199E15C0320B85372A24E35554A2CCFC4342E0B7CD8DAED9A04F9B8EF4A">>},
+ {<<"ctx">>, <<"8FF88B70E6400C4DF90142E7F130625B82086077A45364A78D208ED3ED53C7FE">>},
+ {<<"gproc">>, <<"CEA02C578589C61E5341FCE149EA36CCEF236CC2ECAC8691FBA408E7EA77EC2F">>},
+ {<<"grpcbox">>, <<"97C7126296A091602D372EBF5860A04F7BC795B45B33A984CAD2B8E362774FD8">>},
+ {<<"hpack">>, <<"17670F83FF984AE6CD74B1C456EDDE906D27FF013740EE4D9EFAA4F1BF999633">>}]},
+{pkg_hash_ext,[
+ {<<"acceptor_pool">>, <<"0CBCD83FDC8B9AD2EEE2067EF8B91A14858A5883CB7CD800E6FCD5803E158788">>},
+ {<<"chatterbox">>, <<"6478C161BC60244F41CD5847CC3ACCD26D997883E9F7FACD36FF24533B2FA579">>},
+ {<<"ctx">>, <<"A14ED2D1B67723DBEBBE423B28D7615EB0BDCBA6FF28F2D1F1B0A7E1D4AA5FC2">>},
+ {<<"gproc">>, <<"580ADAFA56463B75263EF5A5DF4C86AF321F68694E7786CB057FD805D1E2A7DE">>},
+ {<<"grpcbox">>, <<"161ABE9E17E7D1982EFA6488ADEAA13C3E847A07984A6E6B224E553368918647">>},
+ {<<"hpack">>, <<"06F580167C4B8B8A6429040DF36CC93BBA6D571FAEAEC1B28816523379CBB23A">>}]}
+].
diff --git a/proto/src/erlang/src/tfpb.app.src b/proto/src/erlang/src/tfpb.app.src
new file mode 100644
index 0000000000000000000000000000000000000000..097bdc5979ece9d9d02322c16bd97816460afc00
--- /dev/null
+++ b/proto/src/erlang/src/tfpb.app.src
@@ -0,0 +1,24 @@
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+{application, tfpb,
+ [{description, "Teraflow Erlang Protocol Buffers"},
+  {vsn, "0.1.0"},
+  {registered, []},
+  {applications, [kernel, stdlib, grpcbox]},
+  {env, []},
+  {modules, []},
+  {licenses, ["Apache 2.0"]},
+  {links, []}
+ ]}.
diff --git a/src/common/Constants.py b/src/common/Constants.py
index ed1c1475ad3c69cfb9bd650f0d99f33c6cf0f2bc..423f2558b71b189b9e771e5af94968d28f8777c0 100644
--- a/src/common/Constants.py
+++ b/src/common/Constants.py
@@ -56,6 +56,7 @@ class ServiceNameEnum(Enum):
     OPTICALATTACKDETECTOR  = 'opticalattackdetector'
     OPTICALATTACKMITIGATOR = 'opticalattackmitigator'
     CACHING                = 'caching'
+    TE                     = 'te'
 
     # Used for test and debugging only
     DLT_GATEWAY    = 'dltgateway'
@@ -80,6 +81,7 @@ DEFAULT_SERVICE_GRPC_PORTS = {
     ServiceNameEnum.OPTICALATTACKMANAGER   .value : 10005,
     ServiceNameEnum.INTERDOMAIN            .value : 10010,
     ServiceNameEnum.PATHCOMP               .value : 10020,
+    ServiceNameEnum.TE                     .value : 10030,
 
     # Used for test and debugging only
     ServiceNameEnum.DLT_GATEWAY   .value : 50051,
diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py
index 668133abff416dd21f693ff41d7b3c6431f5e148..3937eaa114429ce9d004933a5d5baf1ae6137513 100644
--- a/src/context/service/database/models/enums/ServiceType.py
+++ b/src/context/service/database/models/enums/ServiceType.py
@@ -21,6 +21,7 @@ class ORM_ServiceTypeEnum(enum.Enum):
     L3NM                      = ServiceTypeEnum.SERVICETYPE_L3NM
     L2NM                      = ServiceTypeEnum.SERVICETYPE_L2NM
     TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
+    TE                        = ServiceTypeEnum.SERVICETYPE_TE
 
 grpc_to_enum__service_type = functools.partial(
     grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum)
diff --git a/src/l3_attackmitigator/README.md b/src/l3_attackmitigator/README.md
index 04c937a1d35e91071e0357278c81b33335e2e37a..d82400cdcd2a396c6275fea39dc1c127ee3510dc 100644
--- a/src/l3_attackmitigator/README.md
+++ b/src/l3_attackmitigator/README.md
@@ -1,3 +1,8 @@
-# l3_attackmitigator
-- Receives packages and process it with TSTAT
-- Functions: ReportSummarizeKpi(KpiList)
+# L3 Attack Mitigator
+
+Receives detected attacks from the Centralized Attack Detector component and performs the necessary mitigations.
+
+## Functions:
+- PerformMitigation(L3AttackmitigatorOutput) -> StatusMessage
+- GetMitigation(Empty) -> Empty
+- GetConfiguredACLRules(Empty) -> ACLRules
diff --git a/src/l3_attackmitigator/client/l3_attackmitigatorClient.py b/src/l3_attackmitigator/client/l3_attackmitigatorClient.py
index c5d98b1c4974172e50e65db16ba4753e742eab28..bae3fd62785e02eed1cd8fd7678c1775b0193d84 100644
--- a/src/l3_attackmitigator/client/l3_attackmitigatorClient.py
+++ b/src/l3_attackmitigator/client/l3_attackmitigatorClient.py
@@ -15,17 +15,12 @@
 import grpc, logging
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty
+from common.proto.l3_attackmitigator_pb2 import L3AttackmitigatorOutput, ACLRules
+from common.proto.l3_attackmitigator_pb2_grpc import L3AttackmitigatorStub
+from common.proto.l3_centralizedattackdetector_pb2 import StatusMessage
 from common.tools.client.RetryDecorator import retry, delay_exponential
-from common.proto.l3_attackmitigator_pb2_grpc import (
-    L3AttackmitigatorStub,
-)
-from common.proto.l3_attackmitigator_pb2 import (
-    L3AttackmitigatorOutput, ACLRules
-)
-
-from common.proto.context_pb2 import (
-    Empty
-)
+from common.tools.grpc.Tools import grpc_message_to_json_string
 
 LOGGER = logging.getLogger(__name__)
 MAX_RETRIES = 15
@@ -37,7 +32,7 @@ class l3_attackmitigatorClient:
         if not host: host = get_service_host(ServiceNameEnum.L3_AM)
         if not port: port = get_service_port_grpc(ServiceNameEnum.L3_AM)
         self.endpoint = "{}:{}".format(host, port)
-        LOGGER.debug("Creating channel to {}...".format(self.endpoint))
+        LOGGER.debug("Creating channel to {:s}...".format(self.endpoint))
         self.channel = None
         self.stub = None
         self.connect()
@@ -54,23 +49,22 @@ class l3_attackmitigatorClient:
         self.stub = None
 
     @RETRY_DECORATOR
-    def PerformMitigation(self, request: L3AttackmitigatorOutput) -> Empty:
-        LOGGER.debug('PerformMitigation request: {}'.format(request))
+    def PerformMitigation(self, request: L3AttackmitigatorOutput) -> StatusMessage:
+        LOGGER.debug('PerformMitigation request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.PerformMitigation(request)
-        LOGGER.debug('PerformMitigation result: {}'.format(response))
+        LOGGER.debug('PerformMitigation result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
     
     @RETRY_DECORATOR
     def GetMitigation(self, request: Empty) -> Empty:
-        LOGGER.debug('GetMitigation request: {}'.format(request))
+        LOGGER.debug('GetMitigation request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetMitigation(request)
-        LOGGER.debug('GetMitigation result: {}'.format(response))
+        LOGGER.debug('GetMitigation result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
     
     @RETRY_DECORATOR
     def GetConfiguredACLRules(self, request: Empty) -> ACLRules:
-        LOGGER.debug('GetConfiguredACLRules request: {}'.format(request))
+        LOGGER.debug('GetConfiguredACLRules request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.GetConfiguredACLRules(request)
-        LOGGER.debug('GetConfiguredACLRules result: {}'.format(response))
+        LOGGER.debug('GetConfiguredACLRules result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
-
diff --git a/src/l3_attackmitigator/requirements.in b/src/l3_attackmitigator/requirements.in
index a8aba849708799232f6b0732c3661396266da329..38d04994fb0fa1951fb465bc127eb72659dc2eaf 100644
--- a/src/l3_attackmitigator/requirements.in
+++ b/src/l3_attackmitigator/requirements.in
@@ -11,5 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-# no extra dependency
diff --git a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
index f3613b377a86f61ba0a76665eb3001f5d9721a2a..5a7abe0a7416e61ae73b24e5f528ebc1717d8f2e 100644
--- a/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
+++ b/src/l3_attackmitigator/service/l3_attackmitigatorServiceServicerImpl.py
@@ -13,33 +13,39 @@
 # limitations under the License.
 
 from __future__ import print_function
+
+import grpc
 import logging
 import time
 
-from common.proto.l3_centralizedattackdetector_pb2 import Empty
-from common.proto.l3_attackmitigator_pb2_grpc import L3AttackmitigatorServicer
-from common.proto.l3_attackmitigator_pb2 import ACLRules
-from common.proto.context_pb2 import (
-    ServiceId,
-    ConfigActionEnum,
-)
-
+from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 from common.proto.acl_pb2 import AclForwardActionEnum, AclLogActionEnum, AclRuleTypeEnum
-from common.proto.context_pb2 import ConfigActionEnum, Service, ServiceId, ConfigRule
+from common.proto.context_pb2 import ConfigActionEnum, Empty, Service, ServiceId
+from common.proto.l3_attackmitigator_pb2 import ACLRules, L3AttackmitigatorOutput
+from common.proto.l3_attackmitigator_pb2_grpc import L3AttackmitigatorServicer
+from common.proto.l3_centralizedattackdetector_pb2 import StatusMessage
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from context.client.ContextClient import ContextClient
 from service.client.ServiceClient import ServiceClient
 
-from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
 
 LOGGER = logging.getLogger(__name__)
-
-METRICS_POOL = MetricsPool('l3_attackmitigator', 'RPC')
+METRICS_POOL = MetricsPool("l3_attackmitigator", "RPC")
 
 
 class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
     def __init__(self):
-        LOGGER.info("Creating Attack Mitigator Service")
+        """
+        Initializes the Attack Mitigator service.
+
+        Args:
+            None.
+
+        Returns:
+            None.
+        """
+
+        LOGGER.info("Creating Attack Mitigator service")
 
         self.last_value = -1
         self.last_tag = 0
@@ -60,6 +66,23 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
         src_port: str,
         dst_port: str,
     ) -> None:
+        """
+        Configures an ACL rule to block undesired TCP traffic.
+
+        Args:
+            context_uuid (str): The UUID of the context.
+            service_uuid (str): The UUID of the service.
+            device_uuid (str): The UUID of the device.
+            endpoint_uuid (str): The UUID of the endpoint.
+            src_ip (str): The source IP address.
+            dst_ip (str): The destination IP address.
+            src_port (str): The source port.
+            dst_port (str): The destination port.
+
+        Returns:
+            None.
+        """
+
         # Create ServiceId
         service_id = ServiceId()
         service_id.context_id.context_uuid.uuid = context_uuid
@@ -107,29 +130,41 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
         acl_entry.action.forward_action = AclForwardActionEnum.ACLFORWARDINGACTION_DROP
         acl_entry.action.log_action = AclLogActionEnum.ACLLOGACTION_NOLOG
 
-        LOGGER.info("ACL Rule Set: %s", grpc_message_to_json_string(acl_rule_set))
-        LOGGER.info("ACL Config Rule: %s", grpc_message_to_json_string(acl_config_rule))
+        LOGGER.info(f"ACL Rule Set: {grpc_message_to_json_string(acl_rule_set)}")
+        LOGGER.info(f"ACL Config Rule: {grpc_message_to_json_string(acl_config_rule)}")
 
         # Add the ACLRuleSet to the list of configured ACLRuleSets
         self.configured_acl_config_rules.append(acl_config_rule)
+        
+        LOGGER.info(service_request)
 
         # Update the Service with the new ACL RuleSet
-        service_reply: ServiceId = self.service_client.UpdateService(service_request)
+        service_reply = self.service_client.UpdateService(service_request)
 
-        LOGGER.info("Service reply: %s", grpc_message_to_json_string(service_reply))
+        LOGGER.info(f"Service reply: {grpc_message_to_json_string(service_reply)}")
 
         if service_reply != service_request.service_id:  # pylint: disable=no-member
             raise Exception("Service update failed. Wrong ServiceId was returned")
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def PerformMitigation(self, request, context):
+    def PerformMitigation(self, request : L3AttackmitigatorOutput, context : grpc.ServicerContext) -> StatusMessage:
+        """
+        Performs mitigation on an attack by configuring an ACL rule to block undesired TCP traffic.
+
+        Args:
+            request (L3AttackmitigatorOutput): The request message containing the attack mitigation information.
+            context (Empty): The context of the request.
+
+        Returns:
+            StatusMessage: A response with a message indicating that the attack mitigation information
+                was received and processed.
+        """
+
         last_value = request.confidence
         last_tag = request.tag
 
         LOGGER.info(
-            "Attack Mitigator received attack mitigation information. Prediction confidence: %s, Predicted class: %s",
-            last_value,
-            last_tag,
+            f"Attack Mitigator received attack mitigation information. Prediction confidence: {last_value}, Predicted class: {last_tag}"
         )
 
         ip_o = request.ip_o
@@ -141,21 +176,23 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
         counter = 0
         service_id = request.service_id
 
-        LOGGER.info("Service Id.:\n{}".format(grpc_message_to_json_string(service_id)))
-
+        LOGGER.info(f"Service Id.: {grpc_message_to_json_string(service_id)}")
         LOGGER.info("Retrieving service from Context")
+
         while sentinel:
             try:
                 service = self.context_client.GetService(service_id)
                 sentinel = False
             except Exception as e:
                 counter = counter + 1
-                LOGGER.debug("Waiting 2 seconds", counter, e)
+                LOGGER.debug(f"Waiting 2 seconds for service to be available (attempt: {counter})")
                 time.sleep(2)
 
-        LOGGER.info(f"Service with Service Id.: {grpc_message_to_json_string(service_id)}\n{grpc_message_to_json_string(service)}")
-
+        LOGGER.info(
+            f"Service with Service Id.: {grpc_message_to_json_string(service_id)}\n{grpc_message_to_json_string(service)}"
+        )
         LOGGER.info("Adding new rule to the service to block the attack")
+
         self.configure_acl_rule(
             context_uuid=service_id.context_id.context_uuid.uuid,
             service_uuid=service_id.service_uuid.uuid,
@@ -167,8 +204,8 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
             dst_port=port_d,
         )
         LOGGER.info("Service with new rule:\n{}".format(grpc_message_to_json_string(service)))
-
         LOGGER.info("Updating service with the new rule")
+
         self.service_client.UpdateService(service)
         service = self.context_client.GetService(service_id)
 
@@ -178,10 +215,21 @@ class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
             )
         )
 
-        return Empty(message=f"OK, received values: {last_tag} with confidence {last_value}.")
+        return StatusMessage(message=f"OK, received values: {last_tag} with confidence {last_value}.")
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetConfiguredACLRules(self, request, context):
+    def GetConfiguredACLRules(self, request : Empty, context : grpc.ServicerContext) -> ACLRules:
+        """
+        Returns the configured ACL rules.
+
+        Args:
+            request (Empty): The request message.
+            context (Empty): The context of the RPC call.
+
+        Returns:
+            acl_rules (ACLRules): The configured ACL rules.
+        """
+
         acl_rules = ACLRules()
 
         for acl_config_rule in self.configured_acl_config_rules:
diff --git a/src/l3_attackmitigator/service/test_create_service.py b/src/l3_attackmitigator/service/test_create_service.py
deleted file mode 100644
index 01cf769a271de1bbbd0329a3ce21ea476ac10cab..0000000000000000000000000000000000000000
--- a/src/l3_attackmitigator/service/test_create_service.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-import logging
-from common.proto.l3_centralizedattackdetector_pb2 import (
-    Empty
-)
-from common.proto.l3_attackmitigator_pb2_grpc import (
-    L3AttackmitigatorServicer,
-)
-from common.proto.context_pb2 import (
-    Service, ServiceId, ServiceConfig, ServiceTypeEnum, ServiceStatusEnum, ServiceStatus, Context, ContextId, Uuid,
-    Timestamp, ConfigRule, ConfigRule_Custom, ConfigActionEnum, Device, DeviceId, DeviceConfig,
-    DeviceOperationalStatusEnum, DeviceDriverEnum, EndPoint, Link, LinkId, EndPoint, EndPointId, Topology, TopologyId
-)
-from common.proto.context_pb2_grpc import (
-    ContextServiceStub
-)
-from common.proto.service_pb2_grpc import (
-    ServiceServiceStub
-)
-from datetime import datetime
-import grpc
-
-LOGGER = logging.getLogger(__name__)
-CONTEXT_CHANNEL = "192.168.165.78:1010"
-SERVICE_CHANNEL = "192.168.165.78:3030"
-
-class l3_attackmitigatorServiceServicerImpl(L3AttackmitigatorServicer):
-
-    def GetNewService(self, service_id):
-        service = Service()
-        service_id_obj = self.GenerateServiceId(service_id)
-        service.service_id.CopyFrom(service_id_obj)
-        service.service_type = ServiceTypeEnum.SERVICETYPE_L3NM
-        service_status = ServiceStatus()
-        service_status.service_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
-        service.service_status.CopyFrom(service_status)
-        timestamp = Timestamp()
-        timestamp.timestamp = datetime.timestamp(datetime.now())
-        service.timestamp.CopyFrom(timestamp)
-        return service
-    
-    def GetNewContext(self, service_id):
-        context = Context()
-        context_id = ContextId()
-        uuid = Uuid()
-        uuid.uuid = service_id
-        context_id.context_uuid.CopyFrom(uuid)
-        context.context_id.CopyFrom(context_id)
-        return context
-
-    def GetNewDevice(self, service_id):
-        device = Device()
-        device_id = DeviceId()
-        uuid = Uuid()
-        uuid.uuid = service_id
-        device_id.device_uuid.CopyFrom(uuid)
-        device.device_type="test"
-        device.device_id.CopyFrom(device_id)
-        device.device_operational_status = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED
-        return device
-
-    def GetNewLink(self, service_id):
-        link = Link()
-        link_id = LinkId()
-        uuid = Uuid()
-        uuid.uuid = service_id
-        link_id.link_uuid.CopyFrom(uuid)
-        link.link_id.CopyFrom(link_id)
-        return link
-
-    def GetNewTopology(self,context_id, device_id, link_id):
-        topology = Topology()
-        topology_id = TopologyId()
-        topology_id.context_id.CopyFrom(context_id)
-        uuid = Uuid()
-        uuid.uuid = "test_crypto"
-        topology_id.topology_uuid.CopyFrom(uuid)
-        topology.topology_id.CopyFrom(topology_id)
-        topology.device_ids.extend([device_id])
-        topology.link_ids.extend([link_id])
-        return topology
-
-    def GetNewEndpoint(self, topology_id, device_id, uuid_name):
-        endpoint = EndPoint()
-        endpoint_id = EndPointId()
-        endpoint_id.topology_id.CopyFrom(topology_id)
-        endpoint_id.device_id.CopyFrom(device_id)
-        uuid = Uuid()
-        uuid.uuid = uuid_name
-        endpoint_id.endpoint_uuid.CopyFrom(uuid)
-        endpoint.endpoint_id.CopyFrom(endpoint_id)
-        endpoint.endpoint_type = "test"
-        return endpoint
-        
-
-    def __init__(self):
-        LOGGER.debug("Creating Servicer...")
-        self.last_value = -1
-        self.last_tag = 0
-       """ 
-        context = self.GetNewContext("test_crypto")
-        print(context, flush=True)
-        print(self.SetContext(context))
-
-        service = self.GetNewService("test_crypto")
-        print("This is the new service", self.CreateService(service), flush = True)
-
-        ip_o = "127.0.0.1"
-        ip_d = "127.0.0.2"
-        port_o = "123"
-        port_d = "124"
-
-        service_id = self.GenerateServiceId("test_crypto")
-
-        config_rule = self.GetConfigRule(ip_o, ip_d, port_o, port_d)
-
-        service = self.GetService(service_id)
-        print("Service obtained from id", service, flush=True)
-        
-        config_rule = self.GetConfigRule(ip_o, ip_d, port_o, port_d)
-
-        #service_config = service.service_config
-        #service_config.append(config_rule)
-        
-        service_config = ServiceConfig()
-        service_config.config_rules.extend([config_rule])
-        service.service_config.CopyFrom(service_config)
-        
-        device = self.GetNewDevice("test_crypto")
-        print("New device", device, flush=True)
-        device_id = self.SetDevice(device)
-
-        link = self.GetNewLink("test_crypto")
-        print("New link", link, flush=True)
-        link_id = self.SetLink(link)
-        
-        topology = self.GetNewTopology(context.context_id, device.device_id, link.link_id)
-        print("New topology", topology, flush=True)
-        topology_id = self.SetTopology(topology)
-
-        endpoint = self.GetNewEndpoint(topology.topology_id, device.device_id, "test_crypto")
-        print("New endpoint", endpoint, flush=True)
-        link.link_endpoint_ids.extend([endpoint.endpoint_id])
-
-        self.SetLink(link)
-
-        print("Service with new rule", service, flush=True)
-        self.UpdateService(service)
-
-        service2 = self.GetService(service_id)
-        print("Service obtained from id after updating", service2, flush=True)
-        """
-
-    def GenerateRuleValue(self, ip_o, ip_d, port_o, port_d):
-        value = {
-            'ipv4:source-address': ip_o,
-            'ipv4:destination-address': ip_d,
-            'transport:source-port': port_o,
-            'transport:destination-port': port_d,
-            'forwarding-action': 'DROP',
-        }
-        return value
-
-    def GetConfigRule(self, ip_o, ip_d, port_o, port_d):
-        config_rule = ConfigRule()
-        config_rule_custom = ConfigRule_Custom()
-        config_rule.action = ConfigActionEnum.CONFIGACTION_SET
-        config_rule_custom.resource_key = 'test'
-        config_rule_custom.resource_value = str(self.GenerateRuleValue(ip_o, ip_d, port_o, port_d))
-        config_rule.custom.CopyFrom(config_rule_custom)
-        return config_rule
-
-    def GenerateServiceId(self, service_id):
-        service_id_obj = ServiceId()
-        context_id = ContextId()
-        uuid = Uuid()
-        uuid.uuid = service_id
-        context_id.context_uuid.CopyFrom(uuid)
-        service_id_obj.context_id.CopyFrom(context_id)
-        service_id_obj.service_uuid.CopyFrom(uuid)
-        return service_id_obj
-   
-    def SendOutput(self, request, context):
-        # SEND CONFIDENCE TO MITIGATION SERVER
-        print("Server received mitigation values...", request.confidence, flush=True)
-        
-        last_value = request.confidence
-        last_tag = request.tag
-
-        ip_o = request.ip_o
-        ip_d = request.ip_d
-        port_o = request.port_o
-        port_d = request.port_d
-
-        service_id = self.GenerateServiceId(request.service_id)
-
-        config_rule = self.GetConfigRule(ip_o, ip_d, port_o, port_d)
-        
-        service = GetService(service_id)
-        print(service)
-        #service.config_rules.append(config_rule)
-        #UpdateService(service)
-
-        # RETURN OK TO THE CALLER
-        return Empty(
-            message=f"OK, received values: {last_tag} with confidence {last_value}."
-        )
-     
-    def SetDevice(self, device):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            return stub.SetDevice(device)
-
-    def SetLink(self, link):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            return stub.SetLink(link)
-
-    def SetTopology(self, link):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            return stub.SetTopology(link)
-
-
-    def GetService(self, service_id):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            return stub.GetService(service_id)
-
-    def SetContext(self, context):
-        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
-            stub = ContextServiceStub(channel)
-            return stub.SetContext(context)
-
-    def UpdateService(self, service):
-        with grpc.insecure_channel(SERVICE_CHANNEL) as channel:
-            stub = ServiceServiceStub(channel)
-            stub.UpdateService(service)
-
-    def CreateService(self, service):
-        with grpc.insecure_channel(SERVICE_CHANNEL) as channel:
-            stub = ServiceServiceStub(channel)
-            stub.CreateService(service)
-
-    def GetMitigation(self, request, context):
-        # GET OR PERFORM MITIGATION STRATEGY
-        logging.debug("")
-        print("Returing mitigation strategy...")
-        k = self.last_value * 2
-        return Empty(
-            message=f"Mitigation with double confidence = {k}"
-        )
-    
diff --git a/src/l3_centralizedattackdetector/Config.py b/src/l3_centralizedattackdetector/Config.py
index f6c7e33553820b1214e5265cf219db629bcfe006..809380b2cda1c8c556f973e570de36e3189edb99 100644
--- a/src/l3_centralizedattackdetector/Config.py
+++ b/src/l3_centralizedattackdetector/Config.py
@@ -18,7 +18,7 @@ import logging
 LOG_LEVEL = logging.WARNING
 
 # gRPC settings
-GRPC_SERVICE_PORT = 10001  # TODO UPM FIXME
+GRPC_SERVICE_PORT = 10001
 GRPC_MAX_WORKERS = 10
 GRPC_GRACE_PERIOD = 60
 
diff --git a/src/l3_centralizedattackdetector/README.md b/src/l3_centralizedattackdetector/README.md
index bcec4052cc9aa2ea734e08a4ed6b9158609b3532..2273eef80ec4c366d549d20d9447434003257217 100644
--- a/src/l3_centralizedattackdetector/README.md
+++ b/src/l3_centralizedattackdetector/README.md
@@ -1,3 +1,10 @@
-# l3_centralizedattackdetector
-- Receives packages and process it with TSTAT
-- Functions: ReportSummarizeKpi(KpiList)
+# L3 Centralized Attack Detector
+
+Receives snapshot statistics from Distributed Attack Detector component and performs an inference to detect attacks.
+It then sends the detected attacks to the Attack Mitigator component for them to be mitigated.
+
+## Functions: 
+- AnalyzeConnectionStatistics(L3CentralizedattackdetectorMetrics) -> StatusMessage
+- AnalyzeBatchConnectionStatistics(L3CentralizedattackdetectorBatchInput) -> StatusMessage
+- GetFeaturesIds(Empty) -> AutoFeatures
+- SetAttackIPs(AttackIPs) -> Empty
diff --git a/src/l3_centralizedattackdetector/client/l3_centralizedattackdetectorClient.py b/src/l3_centralizedattackdetector/client/l3_centralizedattackdetectorClient.py
index 2ef33438e77dbe4c3609bd21133fb3a9c95c8bcc..8de016a5d56ea1e1fefe23ba6e29f6865ee5e5a6 100644
--- a/src/l3_centralizedattackdetector/client/l3_centralizedattackdetectorClient.py
+++ b/src/l3_centralizedattackdetector/client/l3_centralizedattackdetectorClient.py
@@ -13,18 +13,17 @@
 # limitations under the License.
 
 import grpc, logging
-from common.tools.client.RetryDecorator import retry, delay_exponential
-from common.proto.l3_centralizedattackdetector_pb2_grpc import (
-    L3CentralizedattackdetectorStub,
-)
+from common.proto.context_pb2 import Empty
+from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorStub
 from common.proto.l3_centralizedattackdetector_pb2 import (
+    AttackIPs,
     AutoFeatures,
-    Empty,
     L3CentralizedattackdetectorBatchInput,
     L3CentralizedattackdetectorMetrics,
-    ModelInput,
-    ModelOutput
+    StatusMessage
 )
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
 
 LOGGER = logging.getLogger(__name__)
 MAX_RETRIES = 15
@@ -34,7 +33,7 @@ RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION,
 class l3_centralizedattackdetectorClient:
     def __init__(self, address, port):
         self.endpoint = "{}:{}".format(address, port)
-        LOGGER.debug("Creating channel to {}...".format(self.endpoint))
+        LOGGER.debug("Creating channel to {:s}...".format(self.endpoint))
         self.channel = None
         self.stub = None
         self.connect()
@@ -51,24 +50,29 @@ class l3_centralizedattackdetectorClient:
         self.stub = None
 
     @RETRY_DECORATOR
-    def AnalyzeConnectionStatistics(self, request: L3CentralizedattackdetectorMetrics) -> Empty:
-        LOGGER.debug('AnalyzeConnectionStatistics request: {}'.format(request))
+    def AnalyzeConnectionStatistics(self, request : L3CentralizedattackdetectorMetrics) -> StatusMessage:
+        LOGGER.debug('AnalyzeConnectionStatistics request: {:s}'.format(grpc_message_to_json_string(request)))
         response = self.stub.AnalyzeConnectionStatistics(request)
-        LOGGER.debug('AnalyzeConnectionStatistics result: {}'.format(response))
+        LOGGER.debug('AnalyzeConnectionStatistics result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
-    
+
     @RETRY_DECORATOR
-    def AnalyzeBatchConnectionStatistics(self, request: L3CentralizedattackdetectorBatchInput) -> Empty:
-        LOGGER.debug('AnalyzeBatchConnectionStatistics request: {}'.format(request))
-        response = self.stub.GetOutput(request)
-        LOGGER.debug('AnalyzeBatchConnectionStatistics result: {}'.format(response))
+    def AnalyzeBatchConnectionStatistics(self, request: L3CentralizedattackdetectorBatchInput) -> StatusMessage:
+        LOGGER.debug('AnalyzeBatchConnectionStatistics request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.AnalyzeBatchConnectionStatistics(request)
+        LOGGER.debug('AnalyzeBatchConnectionStatistics result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
-    
+
     @RETRY_DECORATOR
-    def GetFeaturesIds(self, request: Empty) -> AutoFeatures:
-        LOGGER.debug('GetFeaturesIds request: {}'.format(request))
-        response = self.stub.GetOutput(request)
-        LOGGER.debug('GetFeaturesIds result: {}'.format(response))
+    def GetFeaturesIds(self, request : Empty) -> AutoFeatures:
+        LOGGER.debug('GetFeaturesIds request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.GetFeaturesIds(request)
+        LOGGER.debug('GetFeaturesIds result: {:s}'.format(grpc_message_to_json_string(response)))
         return response
 
-
+    @RETRY_DECORATOR
+    def SetAttackIPs(self, request : AttackIPs) -> Empty:
+        LOGGER.debug('SetAttackIPs request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.SetAttackIPs(request)
+        LOGGER.debug('SetAttackIPs result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
index 3bfd6fd2ff09ef471d94b6c66470ed5668704827..91793230d0626d9a8dc112c6442a7364b6beb1a1 100644
--- a/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
+++ b/src/l3_centralizedattackdetector/service/l3_centralizedattackdetectorServiceServicerImpl.py
@@ -13,34 +13,34 @@
 # limitations under the License.
 
 from __future__ import print_function
-from datetime import datetime, timedelta
 
 import csv
-import os
+import grpc
+import logging
 import numpy as np
 import onnxruntime as rt
-import logging
+import os
 import time
 import uuid
 
+from datetime import datetime, timedelta
 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method
-from common.proto.context_pb2 import Timestamp, SliceId, ConnectionId
+from common.proto.context_pb2 import Empty, Timestamp
 from common.proto.kpi_sample_types_pb2 import KpiSampleType
 from common.proto.l3_attackmitigator_pb2 import L3AttackmitigatorOutput
-from common.proto.l3_centralizedattackdetector_pb2 import Empty, AutoFeatures
+from common.proto.l3_centralizedattackdetector_pb2 import AttackIPs, AutoFeatures, L3CentralizedattackdetectorMetrics, StatusMessage
 from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorServicer
 from common.proto.monitoring_pb2 import Kpi, KpiDescriptor
 from common.tools.timestamp.Converters import timestamp_utcnow_to_float
-from monitoring.client.MonitoringClient import MonitoringClient
 from l3_attackmitigator.client.l3_attackmitigatorClient import l3_attackmitigatorClient
+from monitoring.client.MonitoringClient import MonitoringClient
 
 
 LOGGER = logging.getLogger(__name__)
 current_dir = os.path.dirname(os.path.abspath(__file__))
 
-# Constants
-DEMO_MODE = False
-ATTACK_IPS = ["37.187.95.110", "91.121.140.167", "94.23.23.52", "94.23.247.226", "149.202.83.171"]
+# Environment variables
+TEST_ML_MODEL = True if int(os.getenv("TEST_ML_MODEL", 0)) == 1 else False
 BATCH_SIZE = int(os.getenv("BATCH_SIZE", 10))
 METRICS_POOL = MetricsPool("l3_centralizedattackdetector", "RPC")
 
@@ -61,16 +61,21 @@ class ConnectionInfo:
         )
 
     def __str__(self):
-        return "ip_o: " + self.ip_o + "\nport_o: " + self.port_o + "\nip_d: " + self.ip_d + "\nport_d: " + self.port_d
+        return f"ip_o: {self.ip_o}\nport_o: {self.port_o}\nip_d: {self.ip_d}\nport_d: {self.port_d}"
 
 
 class l3_centralizedattackdetectorServiceServicerImpl(L3CentralizedattackdetectorServicer):
+    def __init__(self):
+        """
+        Initializes the Centralized Attack Detector service.
 
-    """
-    Initialize variables, prediction model and clients of components used by CAD
-    """
+        Args:
+            None
+
+        Returns:
+            None
+        """
 
-    def __init__(self):
         LOGGER.info("Creating Centralized Attack Detector Service")
 
         self.inference_values = []
@@ -82,14 +87,14 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         )
         self.cryptomining_detector_model = rt.InferenceSession(self.cryptomining_detector_model_path)
 
-        # Load cryptomining detector features metadata from ONNX file
+        # Load cryptomining attack detector features metadata from ONNX file
         self.cryptomining_detector_features_metadata = list(
             self.cryptomining_detector_model.get_modelmeta().custom_metadata_map.values()
         )
         self.cryptomining_detector_features_metadata = [float(x) for x in self.cryptomining_detector_features_metadata]
         self.cryptomining_detector_features_metadata.sort()
-        LOGGER.info("Cryptomining Detector Features: " + str(self.cryptomining_detector_features_metadata))
 
+        LOGGER.info(f"Cryptomining Attack Detector Features: {self.cryptomining_detector_features_metadata}")
         LOGGER.info(f"Batch size: {BATCH_SIZE}")
 
         self.input_name = self.cryptomining_detector_model.get_inputs()[0].name
@@ -121,7 +126,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         self.monitored_kpis = {
             "l3_security_status": {
                 "kpi_id": None,
-                "description": "L3 - Confidence of the cryptomining detector in the security status in the last time interval of the service {service_id}",
+                "description": "L3 - Confidence of the cryptomining attack detector in the security status in the last time interval of the service {service_id}",
                 "kpi_sample_type": KpiSampleType.KPISAMPLETYPE_L3_SECURITY_STATUS_CRYPTO,
                 "service_ids": [],
             },
@@ -170,6 +175,9 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
         # AM evaluation tests
         self.am_notification_times = []
+        
+        # List of attack connections IPs
+        self.attack_ips = []
 
         # List of attack connections
         self.attack_connections = []
@@ -180,13 +188,12 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         self.false_positives = 0
         self.false_negatives = 0
 
-        self.replica_uuid = uuid.uuid4()
+        self.pod_id = uuid.uuid4()
+        LOGGER.info(f"Pod Id.: {self.pod_id}")
 
         self.first_batch_request_time = 0
         self.last_batch_request_time = 0
 
-        LOGGER.info("This replica's identifier is: " + str(self.replica_uuid))
-
         self.response_times_csv_file_path = "response_times.csv"
         col_names = ["timestamp_first_req", "timestamp_last_req", "total_time", "batch_size"]
 
@@ -194,16 +201,6 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             writer = csv.writer(file)
             writer.writerow(col_names)
 
-    """
-    Create a monitored KPI for a specific service and add it to the Monitoring Client
-        -input: 
-            + service_id: service ID where the KPI will be monitored
-            + kpi_name: name of the KPI
-            + kpi_description: description of the KPI
-            + kpi_sample_type: KPI sample type of the KPI (it must be defined in the kpi_sample_types.proto file)
-        -output: KPI identifier representing the KPI
-    """
-
     def create_kpi(
         self,
         service_id,
@@ -211,24 +208,40 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         kpi_description,
         kpi_sample_type,
     ):
+        """
+        Creates a new KPI for a specific service and add it to the Monitoring client
+
+        Args:
+            service_id (ServiceID): The ID of the service.
+            kpi_name (str): The name of the KPI.
+            kpi_description (str): The description of the KPI.
+            kpi_sample_type (KpiSampleType): The sample type of the KPI.
+
+        Returns:
+            kpi (Kpi): The created KPI.
+        """
+
         kpidescriptor = KpiDescriptor()
         kpidescriptor.kpi_description = kpi_description
         kpidescriptor.service_id.service_uuid.uuid = service_id.service_uuid.uuid
         kpidescriptor.kpi_sample_type = kpi_sample_type
-        new_kpi = self.monitoring_client.SetKpi(kpidescriptor)
+        kpi = self.monitoring_client.SetKpi(kpidescriptor)
 
         LOGGER.info("Created KPI {}".format(kpi_name))
 
-        return new_kpi
-
-    """
-    Create the monitored KPIs for a specific service, add them to the Monitoring Client and store their identifiers in the monitored_kpis dictionary
-        -input:
-            + service_id: service ID where the KPIs will be monitored
-        -output: None
-    """
+        return kpi
 
     def create_kpis(self, service_id):
+        """
+        Creates the monitored KPIs for a specific service, adds them to the Monitoring client and stores their identifiers in the monitored_kpis dictionary
+
+        Args:
+            service_id (uuid): The ID of the service.
+
+        Returns:
+            None
+        """
+
         LOGGER.info("Creating KPIs for service {}".format(service_id))
 
         # all the KPIs are created for all the services from which requests are received
@@ -245,6 +258,16 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         LOGGER.info("Created KPIs for service {}".format(service_id))
 
     def monitor_kpis(self):
+        """
+        Monitors KPIs for all the services from which requests are received
+
+        Args:
+            None
+
+        Returns:
+            None
+        """
+
         monitor_inference_results = self.inference_results
         monitor_service_ids = self.service_ids
 
@@ -256,13 +279,22 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             for service_id in monitor_service_ids:
                 LOGGER.debug("service_id: {}".format(service_id))
                 
-                self.monitor_compute_l3_kpi(service_id, monitor_inference_results)
-                
+                self.monitor_compute_l3_kpi()
                 LOGGER.debug("KPIs sent to monitoring server")
         else:
             LOGGER.debug("No KPIs sent to monitoring server")
 
     def assign_timestamp(self, monitor_inference_results):
+        """
+        Assigns a timestamp to the monitored inference results.
+
+        Args:
+            monitor_inference_results (list): A list of monitored inference results.
+
+        Returns:
+            None
+        """
+
         time_interval = self.MONITORED_KPIS_TIME_INTERVAL_AGG
 
         # assign the timestamp of the first inference result to the time_interval_start
@@ -302,7 +334,19 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         LOGGER.debug("time_interval_start: {}".format(self.time_interval_start))
         LOGGER.debug("time_interval_end: {}".format(self.time_interval_end))
 
-    def monitor_compute_l3_kpi(self,):
+    def monitor_compute_l3_kpi(
+        self,
+    ):
+        """
+        Computes the monitored KPIs for a specific service and sends them to the Monitoring server
+
+        Args:
+            None
+
+        Returns:
+            None
+        """
+
         # L3 security status
         kpi_security_status = Kpi()
         kpi_security_status.kpi_id.kpi_id.CopyFrom(self.monitored_kpis["l3_security_status"]["kpi_id"])
@@ -357,19 +401,36 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             LOGGER.debug("Error sending KPIs to monitoring server: {}".format(e))
 
     def monitor_ml_model_confidence(self):
-        if self.l3_security_status == 0:
-            return self.l3_ml_model_confidence_normal
+        """
+        Get the monitored KPI for the confidence of the ML model
+
+        Args:
+            None
+
+        Returns:
+            confidence (float): The monitored KPI for the confidence of the ML model
+        """
 
-        return self.l3_ml_model_confidence_crypto
+        confidence = None
 
-    """
-    Classify connection as standard traffic or cryptomining attack and return results
-        -input: 
-            + request: L3CentralizedattackdetectorMetrics object with connection features information
-        -output: L3AttackmitigatorOutput object with information about the assigned class and prediction confidence
-    """
+        if self.l3_security_status == 0:
+            confidence = self.l3_ml_model_confidence_normal
+        else:
+            confidence = self.l3_ml_model_confidence_crypto
+
+        return confidence
 
     def perform_inference(self, request):
+        """
+        Performs inference on the input data using the Cryptomining Attack Detector model to classify the connection as standard traffic or cryptomining attack.
+
+        Args:
+            request (L3CentralizedattackdetectorMetrics): A L3CentralizedattackdetectorMetrics object with connection features information.
+
+        Returns:
+            dict: A dictionary containing the predicted class, the probability of that class, and other relevant information required to block the attack.
+        """
+
         x_data = np.array([[feature.feature for feature in request.features]])
 
         # Print input data shape
@@ -443,14 +504,17 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
         return output_message
 
-    """
-    Classify connection as standard traffic or cryptomining attack and return results
-        -input: 
-            + request: L3CentralizedattackdetectorMetrics object with connection features information
-        -output: L3AttackmitigatorOutput object with information about the assigned class and prediction confidence
-    """
+    def perform_batch_inference(self, requests):
+        """
+        Performs batch inference on the input data using the Cryptomining Attack Detector model to classify the connection as standard traffic or cryptomining attack.
+
+        Args:
+            requests (list): A list of L3CentralizedattackdetectorMetrics objects with connection features information.
+
+        Returns:
+            list: A list of dictionaries containing the predicted class, the probability of that class, and other relevant information required to block the attack for each request.
+        """
 
-    def perform_distributed_inference(self, requests):
         batch_size = len(requests)
 
         # Create an empty array to hold the input data
@@ -533,15 +597,25 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
         return output_messages
 
-    """
-    Receive features from Attack Mitigator, predict attack and communicate with Attack Mitigator
-        -input: 
-            + request: L3CentralizedattackdetectorMetrics object with connection features information
-        -output: Empty object with a message about the execution of the function
-    """
-
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def AnalyzeConnectionStatistics(self, request, context):
+    def AnalyzeConnectionStatistics(
+        self, request : L3CentralizedattackdetectorMetrics, context : grpc.ServicerContext
+    ) -> StatusMessage:
+        """
+        Analyzes the connection statistics sent in the request, performs batch inference on the
+        input data using the Cryptomining Attack Detector model to classify the connection as
+        standard traffic or cryptomining attack, and notifies the Attack Mitigator component in
+        case of attack.
+
+        Args:
+            request (L3CentralizedattackdetectorMetrics): A L3CentralizedattackdetectorMetrics
+                object with connection features information.
+            context (grpc.ServicerContext): The context of the request.
+
+        Returns:
+            StatusMessage: An response indicating that the information was received and processed.
+        """
+
         # Perform inference with the data sent in the request
         if len(self.active_requests) == 0:
             self.first_batch_request_time = time.time()
@@ -549,14 +623,14 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         self.active_requests.append(request)
 
         if len(self.active_requests) >= BATCH_SIZE:
-            LOGGER.debug("Performing inference... {}".format(self.replica_uuid))
+            LOGGER.debug("Performing inference... {}".format(self.pod_id))
 
             inference_time_start = time.time()
-            cryptomining_detector_output = self.perform_distributed_inference(self.active_requests)
+            cryptomining_detector_output = self.perform_batch_inference(self.active_requests)
             inference_time_end = time.time()
 
             LOGGER.debug("Inference performed in {} seconds".format(inference_time_end - inference_time_start))
-            logging.info("Inference performed correctly")
+            LOGGER.info("Inference performed correctly")
 
             self.inference_results.append({"output": cryptomining_detector_output, "timestamp": datetime.now()})
             LOGGER.debug("inference_results length: {}".format(len(self.inference_results)))
@@ -564,7 +638,8 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             for i, req in enumerate(self.active_requests):
                 service_id = req.connection_metadata.service_id
 
-                # Check if a request of a new service has been received and, if so, create the monitored KPIs for that service
+                # Check if a request of a new service has been received and, if so, create
+                # the monitored KPIs for that service
                 if service_id not in self.service_ids:
                     self.create_kpis(service_id)
                     self.service_ids.append(service_id)
@@ -576,7 +651,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
                 LOGGER.debug("Monitoring KPIs performed in {} seconds".format(monitor_kpis_end - monitor_kpis_start))
                 LOGGER.debug("cryptomining_detector_output: {}".format(cryptomining_detector_output[i]))
 
-                if DEMO_MODE:
+                if TEST_ML_MODEL:
                     self.analyze_prediction_accuracy(cryptomining_detector_output[i]["confidence"])
 
                 connection_info = ConnectionInfo(
@@ -613,10 +688,10 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
 
                 # Only notify Attack Mitigator when a cryptomining connection has been detected
                 if cryptomining_detector_output[i]["tag_name"] == "Crypto":
-                    if DEMO_MODE:
+                    if TEST_ML_MODEL:
                         self.attack_connections.append(connection_info)
 
-                    if connection_info.ip_o in ATTACK_IPS or connection_info.ip_d in ATTACK_IPS:
+                    if connection_info.ip_o in self.attack_ips or connection_info.ip_d in self.attack_ips:
                         self.correct_attack_conns += 1
                         self.correct_predictions += 1
                     else:
@@ -629,17 +704,17 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
                     LOGGER.debug("Crypto attack detected")
 
                     # Notify the Attack Mitigator component about the attack
-                    logging.info(
+                    LOGGER.info(
                         "Notifying the Attack Mitigator component about the attack in order to block the connection..."
                     )
 
                     try:
-                        logging.info("Sending the connection information to the Attack Mitigator component...")
+                        LOGGER.info("Sending the connection information to the Attack Mitigator component...")
                         message = L3AttackmitigatorOutput(**cryptomining_detector_output[i])
-                        
+
                         am_response = self.attackmitigator_client.PerformMitigation(message)
                         LOGGER.debug("AM response: {}".format(am_response))
-                        
+
                         notification_time_end = time.time()
 
                         self.am_notification_times.append(notification_time_end - notification_time_start)
@@ -670,18 +745,18 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
                                 f.write("Std notification time: {}\n".format(std_notification_time))
                                 f.write("Median notification time: {}\n".format(median_notification_time))
 
-                        logging.info("Attack Mitigator notified")
+                        LOGGER.info("Attack Mitigator notified")
 
                     except Exception as e:
-                        logging.error("Error notifying the Attack Mitigator component about the attack: ", e)
-                        logging.error("Couldn't find l3_attackmitigator")
+                        LOGGER.error("Error notifying the Attack Mitigator component about the attack: ", e)
+                        LOGGER.error("Couldn't find l3_attackmitigator")
 
-                        return Empty(message="Attack Mitigator not found")
+                        return StatusMessage(message="Attack Mitigator not found")
                 else:
-                    logging.info("No attack detected")
+                    LOGGER.info("No attack detected")
 
                     if cryptomining_detector_output[i]["tag_name"] != "Crypto":
-                        if connection_info.ip_o not in ATTACK_IPS and connection_info.ip_d not in ATTACK_IPS:
+                        if connection_info.ip_o not in self.attack_ips and connection_info.ip_d not in self.attack_ips:
                             self.correct_predictions += 1
                         else:
                             LOGGER.debug("False negative: {}".format(connection_info))
@@ -705,11 +780,21 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
                 writer = csv.writer(file)
                 writer.writerow(col_values)
 
-            return Empty(message="Ok, metrics processed")
+            return StatusMessage(message="Ok, metrics processed")
 
-        return Empty(message="Ok, information received")
+        return StatusMessage(message="Ok, information received")
 
     def analyze_prediction_accuracy(self, confidence):
+        """
+        Analyzes the prediction accuracy of the Centralized Attack Detector.
+
+        Args:
+            confidence (float): The confidence level of the Cryptomining Attack Detector model.
+
+        Returns:
+            None
+        """
+
         LOGGER.info("Number of Attack Connections Correctly Classified: {}".format(self.correct_attack_conns))
         LOGGER.info("Number of Attack Connections: {}".format(len(self.attack_connections)))
 
@@ -726,7 +811,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             cryptomining_attack_detection_acc = 0
 
         LOGGER.info("Cryptomining Attack Detection Accuracy: {}".format(cryptomining_attack_detection_acc))
-        LOGGER.info("Cryptomining Detector Confidence: {}".format(confidence))
+        LOGGER.info("Cryptomining Attack Detector Confidence: {}".format(confidence))
 
         with open("prediction_accuracy.txt", "a") as f:
             LOGGER.debug("Exporting prediction accuracy and confidence")
@@ -738,12 +823,28 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
             f.write("False Positives: {}\n".format(self.false_positives))
             f.write("True Negatives: {}\n".format(self.total_predictions - len(self.attack_connections)))
             f.write("False Negatives: {}\n".format(self.false_negatives))
-            f.write("Cryptomining Detector Confidence: {}\n\n".format(confidence))
+            f.write("Cryptomining Attack Detector Confidence: {}\n\n".format(confidence))
             f.write("Timestamp: {}\n".format(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
             f.close()
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def AnalyzeBatchConnectionStatistics(self, request, context):
+    def AnalyzeBatchConnectionStatistics(
+        self, request : L3CentralizedattackdetectorBatchInput, context : grpc.ServicerContext
+    ) -> StatusMessage:
+        """
+        Analyzes a batch of connection statistics sent in the request, performs batch inference on the
+        input data using the Cryptomining Attack Detector model to classify the connection as standard
+        traffic or cryptomining attack, and notifies the Attack Mitigator component in case of attack.
+
+        Args:
+            request (L3CentralizedattackdetectorBatchInput): A L3CentralizedattackdetectorBatchInput
+                object with connection features information.
+            context (grpc.ServicerContext): The context of the request.
+
+        Returns:
+            StatusMessage: An StatusMessage indicating that the information was received and processed.
+        """
+
         batch_time_start = time.time()
 
         for metric in request.metrics:
@@ -751,25 +852,50 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
         batch_time_end = time.time()
 
         with open("batch_time.txt", "a") as f:
-            f.write(str(len(request.metrics)) + "\n")
-            f.write(str(batch_time_end - batch_time_start) + "\n\n")
+            f.write(f"{len(request.metrics)}\n")
+            f.write(f"{batch_time_end - batch_time_start}\n\n")
             f.close()
 
-        logging.debug("Metrics: " + str(len(request.metrics)))
-        logging.debug("Batch time: " + str(batch_time_end - batch_time_start))
-
-        return Empty(message="OK, information received.")
+        LOGGER.debug(f"Batch time: {batch_time_end - batch_time_start}")
+        LOGGER.debug("Batch time: {}".format(batch_time_end - batch_time_start))
 
-    """
-    Send features allocated in the metadata of the onnx file to the DAD
-        -output: ONNX metadata as a list of integers
-    """
+        return StatusMessage(message="OK, information received.")
 
     @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
-    def GetFeaturesIds(self, request: Empty, context):
-        features = AutoFeatures()
+    def GetFeaturesIds(self, request : Empty, context : grpc.ServicerContext) -> AutoFeatures:
+        """
+        Returns a list of feature IDs used by the Cryptomining Attack Detector model.
+
+        Args:
+            request (Empty): An empty request object.
+            context (grpc.ServicerContext): The context of the request.
+
+        Returns:
+            features_ids (AutoFeatures): A list of feature IDs used by the Cryptomining Attack Detector model.
+        """
+
+        features_ids = AutoFeatures()
 
         for feature in self.cryptomining_detector_features_metadata:
-            features.auto_features.append(feature)
+            features_ids.auto_features.append(feature)
+
+        return features_ids
+
+    @safe_and_metered_rpc_method(METRICS_POOL, LOGGER)
+    def SetAttackIPs(self, request : AttackIPs, context : grpc.ServicerContext) -> Empty:
+        """
+        Sets the list of attack IPs in order to be used to compute the prediction accuracy of the
+        Centralized Attack Detector in case of testing the ML model.
+
+        Args:
+            request (AttackIPs): A list of attack IPs.
+            context (grpc.ServicerContext): The context of the request.
+
+        Returns:
+            empty (Empty): An empty response object.
+        """
+
+        self.attack_ips = request.attack_ips
+        LOGGER.debug(f"Succesfully set attack IPs: {self.attack_ips}")
 
-        return features
+        return Empty()
diff --git a/src/l3_distributedattackdetector/Config.py b/src/l3_distributedattackdetector/Config.py
index e04de0b2622b621fb95f1c382ac3a152836de760..a1419ef09c9b3dcbff5aa576536fae8ffe6bc7a4 100644
--- a/src/l3_distributedattackdetector/Config.py
+++ b/src/l3_distributedattackdetector/Config.py
@@ -18,7 +18,7 @@ import logging
 LOG_LEVEL = logging.WARNING
 
 # gRPC settings
-GRPC_SERVICE_PORT = 10000  # TODO UPM FIXME
+GRPC_SERVICE_PORT = 10000
 GRPC_MAX_WORKERS = 10
 GRPC_GRACE_PERIOD = 60
 
diff --git a/src/l3_distributedattackdetector/README.md b/src/l3_distributedattackdetector/README.md
index d8cac8b72d41c6eb6ce2b2908e6ab7402966ad62..d79563dd8936814132e96aa738216435be44950a 100644
--- a/src/l3_distributedattackdetector/README.md
+++ b/src/l3_distributedattackdetector/README.md
@@ -1,3 +1,3 @@
-# l3_distributedattackdetector
-- Receives packages and process it with TSTAT
-- Functions: ReportSummarizeKpi(KpiList)
+# L3 Distributed Attack Detector
+
+Receives packages and processes them with TSTAT to generate traffic snapshot statistics.
diff --git a/src/l3_distributedattackdetector/requirements.in b/src/l3_distributedattackdetector/requirements.in
index a8aba849708799232f6b0732c3661396266da329..64e4aa198bd8b7902c0bb810e5fcd6f108faae6f 100644
--- a/src/l3_distributedattackdetector/requirements.in
+++ b/src/l3_distributedattackdetector/requirements.in
@@ -12,4 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# no extra dependency
+numpy==1.23.*
+asyncio==3.4.3
diff --git a/src/l3_distributedattackdetector/service/__main__.py b/src/l3_distributedattackdetector/service/__main__.py
index 1f558dfb6c271cf63a9e36ae06cb9993f7e49c57..a8f0ac3c4f9737091c2c1a39134b97ee7bd6de7d 100644
--- a/src/l3_distributedattackdetector/service/__main__.py
+++ b/src/l3_distributedattackdetector/service/__main__.py
@@ -13,207 +13,39 @@
 # limitations under the License.
 
 import logging
-import sys 
-import os 
-import time
-import grpc
-from common.proto.l3_centralizedattackdetector_pb2_grpc import (
-    L3CentralizedattackdetectorStub,
-)
-from common.proto.l3_centralizedattackdetector_pb2 import (
-    ModelInput,
-)
+from sys import stdout
+import sys
+from l3_distributedattackdetector import l3_distributedattackdetector
 
-LOGGER = logging.getLogger(__name__)
-TSTAT_DIR_NAME = "piped/"
-JSON_BLANK = {
-    "ip_o": "",  # Client IP
-    "port_o": "",  # Client port
-    "ip_d": "",  # Server ip
-    "port_d": "",  # Server port
-    "flow_id": "",  # Identifier:c_ip,c_port,s_ip,s_port,time_start
-    "protocol": "",  # Connection protocol
-    "time_start": 0,  # Start of connection
-    "time_end": 0,  # Time of last packet
-}
+#  Setup LOGGER
+LOGGER = logging.getLogger("main_dad_LOGGER")
+LOGGER.setLevel(logging.INFO)
+logFormatter = logging.Formatter(fmt="%(levelname)-8s %(message)s")
+consoleHandler = logging.StreamHandler(stdout)
+consoleHandler.setFormatter(logFormatter)
+LOGGER.addHandler(consoleHandler)
 
-def follow(thefile, time_sleep):
-    """
-    Generator function that yields new lines in a file
-    It reads the logfie (the opened file)
-    """
-    # seek the end of the file
-    thefile.seek(0, os.SEEK_END)
+PROFILING = False
 
-    trozo = ""
-    # start infinite loop
-    while True:
-        # read last line of file
-        line = thefile.readline()
-        # sleep if file hasn't been updated
-        if not line:
-            time.sleep(time_sleep)  # FIXME
-            continue
-
-        if line[-1] != "\n":
-            trozo += line
-            # print ("OJO :"+line+":")
-        else:
-            if trozo != "":
-                line = trozo + line
-                trozo = ""
-            yield line
-
-def load_file(dirname=TSTAT_DIR_NAME):
-    """
-    - Client side -
-    """
-    # "/home/dapi/Tstat/TOSHI/tstat/tstat_DRv4/tstat/piped/"
-
-    while True:
-        here = os.path.dirname(os.path.abspath(__file__))
-        tstat_piped = os.path.join(here, dirname)
-        tstat_dirs = os.listdir(tstat_piped)
-        if len(tstat_dirs) > 0:
-            tstat_dirs.sort()
-            new_dir = tstat_dirs[-1]
-            print(new_dir)
-            # print("dir: {0}".format(new_dir))
-            tstat_file = tstat_piped + new_dir + "/log_tcp_temp_complete"
-            print("tstat_file: {0}".format(tstat_file))
-            return tstat_file
-        else:
-            print("No tstat directory!")
-            time.sleep(1)
-
-def process_line(line):
-    """
-    - Preprocessing before a message per line
-    - Avoids crash when nan are found by generating a 0s array
-    - Returns a list of values
-    """
-
-    def makeDivision(i, j):
-        """
-        Helper function
-        """
-        return i / j if (j and type(i) != str and type(j) != str) else 0
-
-    line = line.split(" ")
-    try:
-        n_packets_server, n_packets_client = float(
-            line[16]), float(line[2])
-    except:
-        return [0 for i in range(9)]
-    n_bits_server, n_bits_client = float(line[22]), float(line[8])
-    seconds = float(line[30]) / 1e6  # Duration in ms
-    values = [
-        makeDivision(n_packets_server, seconds),
-        makeDivision(n_packets_client, seconds),
-        makeDivision(n_bits_server, seconds),
-        makeDivision(n_bits_client, seconds),
-        makeDivision(n_bits_server, n_packets_server),
-        makeDivision(n_bits_client, n_packets_client),
-        makeDivision(n_packets_server, n_packets_client),
-        makeDivision(n_bits_server, n_bits_client),
-    ]
-    return values
-
-def open_channel(input_information):
-    with grpc.insecure_channel("localhost:10001") as channel:
-        stub = L3CentralizedattackdetectorStub(channel)
-        response = stub.SendInput(
-            ModelInput(**input_information))
-        LOGGER.debug("Inferencer send_input sent and received: ",
-                        response.message)
-        # response = stub.get_output(Inferencer_pb2.empty(message=""))
-        # print("Inferencer get_output response: \n", response)
-
-def run(time_sleep, max_lines):
-
-    filename = load_file()
-    write_salida = None
-    print(
-        "following: ",
-        filename,
-        " time to wait:",
-        time_sleep,
-        "lineas_tope:",
-        max_lines,
-        "write salida:",
-        write_salida,
-    )
-    logfile = open(filename, "r")
-    # iterate over the generator
-    loglines = follow(logfile, time_sleep)
-    lin = 0
-    ultima_lin = 0
-    last_line = ""
-    cryptos = 0
-    new_connections = {}  # Dict for storing NEW data
-    connections_db = {}  # Dict for storing ALL data
-    print('Reading lines')
-    for line in loglines:
-        print('Received Line')
-        start = time.time()
-        line_id = line.split(" ")
-        conn_id = (line_id[0], line_id[1], line_id[14], line_id[15])
-        new_connections[conn_id] = process_line(line)
-        try:
-            connections_db[conn_id]["time_end"] = time.time()
-        except KeyError:
-            connections_db[conn_id] = JSON_BLANK.copy()
-            connections_db[conn_id]["time_start"] = time.time()
-            connections_db[conn_id]["time_end"] = time.time()
-            connections_db[conn_id]["ip_o"] = conn_id[0]
-            connections_db[conn_id]["port_o"] = conn_id[1]
-            connections_db[conn_id]["flow_id"] = "".join(conn_id)
-            connections_db[conn_id]["protocol"] = "TCP"
-            connections_db[conn_id]["ip_d"] = conn_id[2]
-            connections_db[conn_id]["port_d"] = conn_id[3]
+def main():
+    l3_distributedattackdetector()
 
-        # CRAFT DICT
-        inference_information = {
-            "n_packets_server_seconds": new_connections[conn_id][0],
-            "n_packets_client_seconds": new_connections[conn_id][1],
-            "n_bits_server_seconds": new_connections[conn_id][2],
-            "n_bits_client_seconds": new_connections[conn_id][3],
-            "n_bits_server_n_packets_server": new_connections[conn_id][4],
-            "n_bits_client_n_packets_client": new_connections[conn_id][5],
-            "n_packets_server_n_packets_client": new_connections[conn_id][6],
-            "n_bits_server_n_bits_client": new_connections[conn_id][7],
-            "ip_o": connections_db[conn_id]["ip_o"],
-            "port_o": connections_db[conn_id]["port_o"],
-            "ip_d": connections_db[conn_id]["ip_d"],
-            "port_d": connections_db[conn_id]["port_d"],
-            "flow_id": connections_db[conn_id]["flow_id"],
-            "protocol": connections_db[conn_id]["protocol"],
-            "time_start": connections_db[conn_id]["time_start"],
-            "time_end": connections_db[conn_id]["time_end"],
-        }
 
-        # SEND MSG
-        try:
-            open_channel(inference_information)
-        except:
-            LOGGER.info("Centralized Attack Mitigator is not up")
+if __name__ == "__main__":
+    if PROFILING:
+        import cProfile, pstats, io
 
-        if write_salida:
-            print(line, end="")
-            sys.stdout.flush()
-        lin += 1
-        if lin >= max_lines:
-            break
-        elif lin == 1:
-            print("primera:", ultima_lin)
+        pr = cProfile.Profile()
+        pr.enable()
 
-        end = time.time() - start
-        print(end)
+    main()
 
+    if PROFILING:
+        pr.disable()
+        s = io.StringIO()
+        sortby = "cumulative"
+        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
+        ps.print_stats()
+        LOGGER.info(s.getvalue())
 
-def main():
-    logging.basicConfig()
-    run(5, 70)
-    
-if __name__ == '__main__':
-    sys.exit(main())
+    sys.exit(0)
\ No newline at end of file
diff --git a/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py b/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py
new file mode 100644
index 0000000000000000000000000000000000000000..357f44a9ab2037438252fb0ca40b1a7dc3c74c54
--- /dev/null
+++ b/src/l3_distributedattackdetector/service/l3_distributedattackdetector.py
@@ -0,0 +1,376 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import asyncio
+import grpc
+import logging
+import numpy as np
+import os
+import signal
+import time
+from sys import stdout
+from common.proto.context_pb2 import (
+    Empty,
+    ServiceTypeEnum,
+    ContextId,
+)
+from common.proto.context_pb2_grpc import ContextServiceStub
+from common.proto.l3_centralizedattackdetector_pb2 import (
+    L3CentralizedattackdetectorMetrics,
+    L3CentralizedattackdetectorBatchInput,
+    ConnectionMetadata,
+    Feature,
+)
+from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorStub
+
+#  Setup LOGGER
+LOGGER = logging.getLogger("dad_LOGGER")
+LOGGER.setLevel(logging.INFO)
+logFormatter = logging.Formatter(fmt="%(levelname)-8s %(message)s")
+consoleHandler = logging.StreamHandler(stdout)
+consoleHandler.setFormatter(logFormatter)
+LOGGER.addHandler(consoleHandler)
+
+TSTAT_DIR_NAME = "piped/"
+CENTRALIZED_ATTACK_DETECTOR = "192.168.165.78:10001"
+JSON_BLANK = {
+    "ip_o": "",  # Client IP
+    "port_o": "",  # Client port
+    "ip_d": "",  # Server ip
+    "port_d": "",  # Server port
+    "flow_id": "",  # Identifier:c_ip,c_port,s_ip,s_port,time_start
+    "protocol": "",  # Connection protocol
+    "time_start": 0.0,  # Start of connection
+    "time_end": 0.0,  # Time of last packet
+}
+
+STOP = False
+IGNORE_FIRST_LINE_TSTAT = True
+
+CONTEXT_ID = "admin"
+CONTEXT_CHANNEL = "192.168.165.78:1010"
+PROFILING = False
+SEND_DATA_IN_BATCHES = False
+BATCH_SIZE = 10
+ATTACK_IPS = ["37.187.95.110", "91.121.140.167", "94.23.23.52", "94.23.247.226", "149.202.83.171"]
+
+class l3_distributedattackdetector():
+    def __init__(self):
+        LOGGER.info("Creating Distributed Attack Detector")
+        
+        self.feature_ids = []
+        
+        self.cad_features = {}
+        self.conn_id = ()
+        
+        self.connections_dict = {} # Dict for storing ALL data
+        self.new_connections = {} # Dict for storing NEW data
+        
+        signal.signal(signal.SIGINT, self.handler)
+        
+        with grpc.insecure_channel(CENTRALIZED_ATTACK_DETECTOR) as channel:
+            self.cad = L3CentralizedattackdetectorStub(channel)
+            LOGGER.info("Connected to the centralized attack detector")
+
+            LOGGER.info("Obtaining features...")
+            self.feature_ids = self.get_features_ids()
+            LOGGER.info("Features Ids.: {:s}".format(str(self.feature_ids)))
+            
+            asyncio.run(self.process_traffic())
+            
+        
+    def handler(self):
+        if STOP:
+            exit()
+
+        STOP = True
+
+        LOGGER.info("Gracefully stopping...")
+    
+    def follow(self, thefile, time_sleep):
+        """
+        Generator function that yields new lines in a file
+        It reads the logfie (the opened file)
+        """
+        # seek the end of the file
+        # thefile.seek(0, os.SEEK_END)
+
+        trozo = ""
+
+        # start infinite loop
+        while True:
+            # read last line of file
+            line = thefile.readline()
+
+            # sleep if file hasn't been updated
+            if not line:
+                time.sleep(time_sleep)
+                continue
+            if line[-1] != "\n":
+                trozo += line
+            else:
+                if trozo != "":
+                    line = trozo + line
+                    trozo = ""
+                yield line
+
+
+    def load_file(self, dirname=TSTAT_DIR_NAME):  # - Client side -
+        while True:
+            here = os.path.dirname(os.path.abspath(__file__))
+            tstat_piped = os.path.join(here, dirname)
+            tstat_dirs = os.listdir(tstat_piped)
+            if len(tstat_dirs) > 0:
+                tstat_dirs.sort()
+                new_dir = tstat_dirs[-1]
+                tstat_file = tstat_piped + new_dir + "/log_tcp_temp_complete"
+                LOGGER.info("Following: {:s}".format(str(tstat_file)))
+                return tstat_file
+            else:
+                LOGGER.info("No Tstat directory!")
+                time.sleep(5)
+
+
+    def process_line(self, line):
+        """
+        - Preprocessing before a message per line
+        - Avoids crash when nan are found by generating a 0s array
+        - Returns a list of values
+        """
+        line = line.split(" ")
+
+        try:
+            values = []
+            for feature_id in self.feature_ids:
+                feature_id = int(feature_id)
+                feature = feature_id - 1
+                values.append(float(line[feature]))
+        except IndexError:
+            print("IndexError: {0}".format(line))
+
+        return values
+
+
+    def get_service_ids(self, context_id_str):
+        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
+            stub = ContextServiceStub(channel)
+            context_id = ContextId()
+            context_id.context_uuid.uuid = context_id_str
+            return stub.ListServiceIds(context_id)
+
+
+    def get_services(self, context_id_str):
+        with grpc.insecure_channel(CONTEXT_CHANNEL) as channel:
+            stub = ContextServiceStub(channel)
+            context_id = ContextId()
+            context_id.context_uuid.uuid = context_id_str
+            return stub.ListServices(context_id)
+
+
+    def get_service_id(self, context_id):
+        service_id_list = self.get_service_ids(context_id)
+        service_id = None
+        for s_id in service_id_list.service_ids:
+            if (
+                s_id.service_uuid.uuid == "0eaa0752-c7b6-4c2e-97da-317fbfee5112"
+            ):  # TODO: Change this identifier to the L3VPN service identifier with the real router for the demo v2
+                service_id = s_id
+                break
+
+        return service_id
+
+
+    def get_service_id2(self, context_id):
+        service_list = self.get_services(context_id)
+        service_id = None
+        for s in service_list.services:
+            if s.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+                service_id = s.service_id
+                break
+            else:
+                pass
+        return service_id
+
+
+    def get_endpoint_id(self, context_id):
+        service_list = self.get_services(context_id)
+        endpoint_id = None
+        for s in service_list.services:
+            if s.service_type == ServiceTypeEnum.SERVICETYPE_L3NM:
+                endpoint_id = s.service_endpoint_ids[0]
+                break
+        return endpoint_id
+
+
+    def get_features_ids(self):
+        return self.cad.GetFeaturesIds(Empty()).auto_features
+
+
+    def check_types(self):
+        for feature in self.cad_features["features"]:
+            assert isinstance(feature, float)
+
+        assert isinstance(self.cad_features["connection_metadata"]["ip_o"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["port_o"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["ip_d"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["port_d"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["flow_id"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["protocol"], str)
+        assert isinstance(self.cad_features["connection_metadata"]["time_start"], float)
+        assert isinstance(self.cad_features["connection_metadata"]["time_end"], float)
+
+
+    def insert_connection(self):
+        try:
+            self.connections_dict[self.conn_id]["time_end"] = time.time()
+        except KeyError:
+            self.connections_dict[self.conn_id] = JSON_BLANK.copy()
+            self.connections_dict[self.conn_id]["time_start"] = time.time()
+            self.connections_dict[self.conn_id]["time_end"] = time.time()
+            self.connections_dict[self.conn_id]["ip_o"] = self.conn_id[0]
+            self.connections_dict[self.conn_id]["port_o"] = self.conn_id[1]
+            self.connections_dict[self.conn_id]["flow_id"] = ":".join(self.conn_id)
+            self.connections_dict[self.conn_id]["service_id"] = self.get_service_id2(CONTEXT_ID)
+            self.connections_dict[self.conn_id]["endpoint_id"] = self.get_endpoint_id(CONTEXT_ID)
+            self.connections_dict[self.conn_id]["protocol"] = "TCP"
+            self.connections_dict[self.conn_id]["ip_d"] = self.conn_id[2]
+            self.connections_dict[self.conn_id]["port_d"] = self.conn_id[3]
+
+
+    def check_if_connection_is_attack(self):
+        if self.conn_id[0] in ATTACK_IPS or self.conn_id[2] in ATTACK_IPS:
+            LOGGER.info("Attack detected. Origin: {0}, destination: {1}".format(self.conn_id[0], self.conn_id[2]))
+
+
+    def create_cad_features(self):
+        self.cad_features = {
+            "features": self.new_connections[self.conn_id][0:10],
+            "connection_metadata": {
+                "ip_o": self.connections_dict[self.conn_id]["ip_o"],
+                "port_o": self.connections_dict[self.conn_id]["port_o"],
+                "ip_d": self.connections_dict[self.conn_id]["ip_d"],
+                "port_d": self.connections_dict[self.conn_id]["port_d"],
+                "flow_id": self.connections_dict[self.conn_id]["flow_id"],
+                "service_id": self.connections_dict[self.conn_id]["service_id"],
+                "endpoint_id": self.connections_dict[self.conn_id]["endpoint_id"],
+                "protocol": self.connections_dict[self.conn_id]["protocol"],
+                "time_start": self.connections_dict[self.conn_id]["time_start"],
+                "time_end": self.connections_dict[self.conn_id]["time_end"],
+            },
+        }
+
+
+    async def send_batch_async(self, metrics_list_pb):
+        loop = asyncio.get_running_loop()
+
+        # Create metrics batch
+        metrics_batch = L3CentralizedattackdetectorBatchInput()
+        metrics_batch.metrics.extend(metrics_list_pb)
+
+        # Send batch
+        future = loop.run_in_executor(
+            None, self.cad.AnalyzeBatchConnectionStatistics, metrics_batch
+        )
+
+        try:
+            await future
+        except Exception as e:
+            LOGGER.error(f"Error sending batch: {e}")
+
+
+    async def send_data(self, metrics_list_pb, send_data_times):
+        # Send to CAD
+        if SEND_DATA_IN_BATCHES:
+            if len(metrics_list_pb) == BATCH_SIZE:
+                send_data_time_start = time.time()
+                await self.send_batch_async(metrics_list_pb)
+                metrics_list_pb = []
+
+                send_data_time_end = time.time()
+                send_data_time = send_data_time_end - send_data_time_start
+                send_data_times = np.append(send_data_times, send_data_time)
+
+        else:
+            send_data_time_start = time.time()
+            self.cad.AnalyzeConnectionStatistics(metrics_list_pb[-1])
+
+            send_data_time_end = time.time()
+            send_data_time = send_data_time_end - send_data_time_start
+            send_data_times = np.append(send_data_times, send_data_time)
+
+        return metrics_list_pb, send_data_times
+
+
+    async def process_traffic(self):
+        LOGGER.info("Loading Tstat log file...")
+        logfile = open(self.load_file(), "r")
+
+        LOGGER.info("Following Tstat log file...")
+        loglines = self.follow(logfile, 5)
+
+        process_time = []
+        num_lines = 0
+
+        send_data_times = np.array([])
+        metrics_list_pb = []
+
+        LOGGER.info("Starting to process data...")
+        
+        index = 0
+        while True:
+            line = next(loglines, None)
+
+            while line is None:
+                LOGGER.info("Waiting for new data...")
+                time.sleep(1 / 100)
+                line = next(loglines, None)
+            if index == 0 and IGNORE_FIRST_LINE_TSTAT:
+                index = index + 1
+                continue
+            if STOP:
+                break
+
+            num_lines += 1
+            start = time.time()
+            line_id = line.split(" ")
+            self.conn_id = (line_id[0], line_id[1], line_id[14], line_id[15])
+            self.new_connections[self.conn_id] = self.process_line(line)
+
+            self.check_if_connection_is_attack()
+
+            self.insert_connection()
+
+            self.create_cad_features()
+            
+            self.check_types()
+            
+            connection_metadata = ConnectionMetadata(**self.cad_features["connection_metadata"])
+            metrics = L3CentralizedattackdetectorMetrics()
+
+            for feature in self.cad_features["features"]:
+                feature_obj = Feature()
+                feature_obj.feature = feature
+                metrics.features.append(feature_obj)
+
+            metrics.connection_metadata.CopyFrom(connection_metadata)
+            metrics_list_pb.append(metrics)
+
+            metrics_list_pb, send_data_times = await self.send_data(metrics_list_pb, send_data_times)
+
+            index = index + 1
+            
+            process_time.append(time.time() - start)
+            
+            if num_lines % 10 == 0:
+                LOGGER.info(f"Number of lines: {num_lines} - Average processing time: {sum(process_time) / num_lines}")
\ No newline at end of file
diff --git a/src/l3_distributedattackdetector/service/tstat b/src/l3_distributedattackdetector/service/tstat
deleted file mode 100644
index 06c7fb082e12c8392b71d0ec2f7d74827d30e4a3..0000000000000000000000000000000000000000
Binary files a/src/l3_distributedattackdetector/service/tstat and /dev/null differ
diff --git a/src/service/client/TEServiceClient.py b/src/service/client/TEServiceClient.py
new file mode 100644
index 0000000000000000000000000000000000000000..19ca95bceb285c03df635859728ecf15640d8438
--- /dev/null
+++ b/src/service/client/TEServiceClient.py
@@ -0,0 +1,67 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grpc, logging
+from common.Constants import ServiceNameEnum
+from common.Settings import get_service_host, get_service_port_grpc
+from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatus
+from common.proto.te_pb2_grpc import TEServiceStub
+from common.tools.client.RetryDecorator import retry, delay_exponential
+from common.tools.grpc.Tools import grpc_message_to_json_string
+
+LOGGER = logging.getLogger(__name__)
+MAX_RETRIES = 15
+DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0)
+RETRY_DECORATOR = retry(max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, prepare_method_name='connect')
+
+class TEServiceClient:
+    def __init__(self, host=None, port=None):
+        if not host: host = get_service_host(ServiceNameEnum.TE)
+        if not port: port = get_service_port_grpc(ServiceNameEnum.TE)
+        self.endpoint = '{:s}:{:s}'.format(str(host), str(port))
+        LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint)))
+        self.channel = None
+        self.stub = None
+        self.connect()
+        LOGGER.debug('Channel created')
+
+    def connect(self):
+        self.channel = grpc.insecure_channel(self.endpoint)
+        self.stub = TEServiceStub(self.channel)
+
+    def close(self):
+        if self.channel is not None: self.channel.close()
+        self.channel = None
+        self.stub = None
+
+    @RETRY_DECORATOR
+    def RequestLSP(self, request : Service) -> ServiceStatus:
+        LOGGER.debug('RequestLSP request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.RequestLSP(request)
+        LOGGER.debug('RequestLSP result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def UpdateLSP(self, request : ServiceId) -> ServiceStatus:
+        LOGGER.debug('UpdateLSP request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.UpdateLSP(request)
+        LOGGER.debug('UpdateLSP result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
+
+    @RETRY_DECORATOR
+    def DeleteLSP(self, request : ServiceId) -> Empty:
+        LOGGER.debug('DeleteLSP request: {:s}'.format(grpc_message_to_json_string(request)))
+        response = self.stub.DeleteLSP(request)
+        LOGGER.debug('DeleteLSP result: {:s}'.format(grpc_message_to_json_string(response)))
+        return response
diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py
index 141c6cb3ce84709d601b0510a61e1c497aa3f598..55d0b593b96d8e07bd8a12d76d7deafe2e42ff8f 100644
--- a/src/service/service/ServiceServiceServicerImpl.py
+++ b/src/service/service/ServiceServiceServicerImpl.py
@@ -27,6 +27,7 @@ from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_s
 from context.client.ContextClient import ContextClient
 from pathcomp.frontend.client.PathCompClient import PathCompClient
 from service.service.tools.ConnectionToString import connection_to_string
+from service.client.TEServiceClient import TEServiceClient
 from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory
 from .task_scheduler.TaskScheduler import TasksScheduler
 from .tools.GeodesicDistance import gps_distance
@@ -97,13 +98,14 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
             context_client, request.service_id, rw_copy=False,
             include_config_rules=True, include_constraints=True, include_endpoint_ids=True)
 
-        location_aware = False
+        gps_location_aware = False
         for constraint in request.service_constraints:
             if constraint.WhichOneof('constraint') != 'endpoint_location': continue
-            location_aware = True
+            if constraint.endpoint_location.location.WhichOneof('location') != 'gps_position': continue
+            gps_location_aware = True
 
-        LOGGER.debug('location_aware={:s}'.format(str(location_aware)))
-        if _service is not None and location_aware:
+        LOGGER.debug('gps_location_aware={:s}'.format(str(gps_location_aware)))
+        if _service is not None and gps_location_aware:
             LOGGER.debug('  Removing previous service')
             tasks_scheduler = TasksScheduler(self.service_handler_factory)
             tasks_scheduler.compose_from_service(_service, is_delete=True)
@@ -116,6 +118,28 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
             service.service_type = request.service_type                                     # pylint: disable=no-member
         service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED     # pylint: disable=no-member
 
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
+            # TE service:
+            context_client.SetService(request)
+
+            te_service_client = TEServiceClient()
+            service_status = te_service_client.RequestLSP(service)
+
+            if service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE:
+                _service : Optional[Service] = get_service_by_id(
+                    context_client, request.service_id, rw_copy=True,
+                    include_config_rules=False, include_constraints=False, include_endpoint_ids=False)
+                _service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE
+                service_id = context_client.SetService(_service)
+                return service_id
+            else:
+                MSG = 'RequestLSP for Service({:s}) returned ServiceStatus({:s})'
+                context_uuid = request.service_id.context_id.context_uuid.uuid
+                service_uuid = request.service_id.service_uuid.uuid
+                service_key = '{:s}/{:s}'.format(context_uuid, service_uuid)
+                str_service_status = ServiceStatusEnum.Name(service_status.service_status)
+                raise Exception(MSG.format(service_key, str_service_status))
+
         del service.service_endpoint_ids[:] # pylint: disable=no-member
         for endpoint_id in request.service_endpoint_ids:
             service.service_endpoint_ids.add().CopyFrom(endpoint_id)    # pylint: disable=no-member
@@ -213,6 +237,14 @@ class ServiceServiceServicerImpl(ServiceServiceServicer):
         service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL
         context_client.SetService(service)
 
+        if service.service_type == ServiceTypeEnum.SERVICETYPE_TE:
+            # TE service
+            te_service_client = TEServiceClient()
+            te_service_client.DeleteLSP(request)
+            context_client.RemoveService(request)
+            return Empty()
+
+        # Normal service
         # Feed TaskScheduler with this service and the sub-services and sub-connections related to this service.
         # TaskScheduler identifies inter-dependencies among them and produces a schedule of tasks (an ordered list of
         # tasks to be executed) to implement the requested delete operation.
diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py
index f86412a8c736fda4d2fff2b485453cc3bf1ce0b1..1b22c5c42e908e9b9455358edd2abf54442628f5 100644
--- a/src/service/service/service_handler_api/FilterFields.py
+++ b/src/service/service/service_handler_api/FilterFields.py
@@ -23,7 +23,8 @@ SERVICE_TYPE_VALUES = {
     ServiceTypeEnum.SERVICETYPE_UNKNOWN,
     ServiceTypeEnum.SERVICETYPE_L3NM,
     ServiceTypeEnum.SERVICETYPE_L2NM,
-    ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
+    ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE,
+    ServiceTypeEnum.SERVICETYPE_TE,
 }
 
 DEVICE_DRIVER_VALUES = {
diff --git a/src/te/.dockerignore b/src/te/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..558d2f3ece10e09c6a5ecd710c21e3f727bdd25c
--- /dev/null
+++ b/src/te/.dockerignore
@@ -0,0 +1,4 @@
+Dockerfile
+_build
+README.md
+.tool-versions
diff --git a/src/te/.gitignore b/src/te/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..eeb7cfc4aefeeb2f4fa86e208119ebc360c3f7af
--- /dev/null
+++ b/src/te/.gitignore
@@ -0,0 +1,21 @@
+.tool-versions
+.rebar3
+_*
+.eunit
+*.o
+*.beam
+*.plt
+*.swp
+*.swo
+.erlang.cookie
+ebin
+log
+erl_crash.dump
+.rebar
+logs
+_build
+.idea
+*.iml
+rebar3.crashdump
+*~
+config/dev.config
diff --git a/src/te/Dockerfile b/src/te/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..aaca9fe87ad330c2d43406d5b5df2a585617d937
--- /dev/null
+++ b/src/te/Dockerfile
@@ -0,0 +1,59 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Multi-stage Docker image build
+
+# Build stage 0
+FROM erlang:24.3-alpine
+
+RUN apk add --no-cache bash git
+
+RUN mkdir /var/teraflow
+WORKDIR /var/teraflow
+
+COPY proto proto
+RUN bash -c proto/generate_code_erlang.sh
+RUN mkdir src
+COPY src/te src/te
+WORKDIR src/te
+RUN rebar3 as prod release
+
+# Build stage 1
+FROM alpine
+
+# Install some libs
+RUN apk add --no-cache libgcc libstdc++ && \
+    apk add --no-cache openssl && \
+    apk add --no-cache libcrypto1.1 && \
+    apk add --no-cache ncurses-libs
+
+# Install the released application
+COPY --from=0 /var/teraflow/src/te/_build/prod/rel/tfte /tfte
+
+# Expose relevant ports
+EXPOSE 10030
+EXPOSE 4189
+
+ARG ERLANG_LOGGER_LEVEL_DEFAULT=debug
+ARG ERLANG_COOKIE_DEFAULT=tfte-unsafe-cookie
+ARG ERLANG_NODE_IP_DEFAULT=127.0.0.1
+ARG ERLANG_NODE_NAME_DEFAULT=tfte
+
+ENV ERLANG_LOGGER_LEVEL=$ERLANG_LOGGER_LEVEL_DEFAULT
+ENV ERLANG_COOKIE=$ERLANG_COOKIE_DEFAULT
+ENV ERLANG_NODE_IP=$ERLANG_NODE_IP_DEFAULT
+ENV ERLANG_NODE_NAME=$ERLANG_NODE_NAME_DEFAULT
+
+ENTRYPOINT ["/tfte/bin/tfte"]
+CMD ["foreground"]
diff --git a/src/te/README.md b/src/te/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5da478e61ca7f88182e757c80ea7b2916de9998d
--- /dev/null
+++ b/src/te/README.md
@@ -0,0 +1,81 @@
+TeraFlow Traffic Engineering Service
+====================================
+
+This service is mean as an example of a Teraflow Service made in Erlang.
+
+The Traffic Engineering service is tested on Ubuntu 20.04. Follow the instructions below to build, test, and run this service on your local environment.
+
+
+## Build
+
+First the TeraFlow protocol buffer code must have been generated:
+
+    $ ../../proto/generate_code_erlang.sh
+
+Then the TE service can be built:
+
+    $ rebar3 compile
+
+
+## Execute Unit Tests
+
+    $ rebar3 eunit
+
+
+## Run Service Console
+
+First you need to crete a configuration file if not already done, and customize it if required:
+
+	$ cp config/dev.config.template config/dev.config
+
+Then you  can start the service in console mode:
+
+    $ rebar3 shell
+
+
+## Docker
+
+### Build Image
+
+The docker image must be built from the root of the Teraflow project:
+
+    $ docker build -t te:dev -f src/te/Dockerfile .
+
+
+### Run a shell from inside the container
+
+    $ docker run -ti --rm --entrypoint sh te:dev
+
+
+### Run Docker Container
+
+    $ docker run -d --name te --init te:dev
+
+
+### Open a Console to a Docker Container's Service
+
+    $ docker exec -it te /tfte/bin/tfte remote_console
+
+
+### Show Logs
+
+    $ docker logs te
+
+
+## Kubernetes
+
+### Open a Console
+
+    $ kubectl --namespace tfs exec -ti $(kubectl --namespace tfs get pods --selector=app=teservice -o name) -c server -- /tfte/bin/tfte remote_console
+
+
+### Show Logs
+
+    $ kubectl --namespace tfs logs $(kubectl --namespace tfs get pods --selector=app=teservice -o name) -c server
+
+
+## Teraflow
+
+To build and deploy the TE service as part of Teraflow, the following line must be added or uncomented in your `my_deploy.sh`:
+
+    export TFS_COMPONENTS="${TFS_COMPONENTS} te"
diff --git a/src/te/apps/epce/src/epce.app.src b/src/te/apps/epce/src/epce.app.src
new file mode 100644
index 0000000000000000000000000000000000000000..13324fd2fd68567623543b16affdd5b4a0241ad4
--- /dev/null
+++ b/src/te/apps/epce/src/epce.app.src
@@ -0,0 +1,30 @@
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+{application, epce,
+ [{description, "An Erlang PCE"},
+  {vsn, "0.1.0"},
+  {registered, []},
+  {mod, {epce_app, []}},
+  {applications,
+   [kernel,
+    stdlib,
+    pcep_server
+   ]},
+  {env,[]},
+  {modules, []},
+
+  {licenses, ["Apache 2.0"]},
+  {links, []}
+ ]}.
diff --git a/src/te/apps/epce/src/epce_app.erl b/src/te/apps/epce/src/epce_app.erl
new file mode 100644
index 0000000000000000000000000000000000000000..19f574f38eb93080beca9b3cc7c47a78994fce57
--- /dev/null
+++ b/src/te/apps/epce/src/epce_app.erl
@@ -0,0 +1,34 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(epce_app).
+
+-behaviour(application).
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Behaviour application functions
+-export([start/2, stop/1]).
+
+
+%%% BEHAVIOUR application FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start(_StartType, _StartArgs) ->
+    epce_sup:start_link().
+
+stop(_State) ->
+    ok.
diff --git a/src/te/apps/epce/src/epce_pcep_server_handler.erl b/src/te/apps/epce/src/epce_pcep_server_handler.erl
new file mode 100644
index 0000000000000000000000000000000000000000..dea88b5d0b253573b956aabcf240ffed0caf15be
--- /dev/null
+++ b/src/te/apps/epce/src/epce_pcep_server_handler.erl
@@ -0,0 +1,90 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(epce_pcep_server_handler).
+
+-behaviour(gen_pcep_handler).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+-include_lib("pcep_codec/include/pcep_codec_te.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API functions
+
+% Behaviour gen_pcep_handler functions
+-export([init/1]).
+-export([opened/4]).
+-export([flow_added/2]).
+-export([flow_initiated/2]).
+-export([ready/1]).
+-export([request_route/2]).
+-export([flow_delegated/2]).
+-export([flow_status_changed/3]).
+-export([terminate/2]).
+
+
+%%% RECORDS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(state, {}).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%% BEHAVIOUR gen_pcep_handler FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    {ok, #{}, #state{}}.
+
+opened(Id, Caps, Sess, State) ->
+    case epce_server:session_opened(Id, Caps, Sess) of
+        ok -> {ok, State};
+        {error, Reason} -> {error, Reason}
+    end.
+
+flow_added(Flow, State) ->
+    case epce_server:flow_added(Flow) of
+        {error, _Reason} = Error -> Error;
+        ok -> {ok, State}
+    end.
+
+flow_initiated(Flow, State) ->
+    ok = epce_server:flow_initiated(Flow),
+    {ok, State}.
+
+ready(State) ->
+    {ok, State}.
+
+request_route(RouteReq, State) ->
+    case epce_server:request_route(RouteReq) of
+        {error, _Reason} = Error -> Error;
+        {ok, Route} -> {ok, Route, State}
+    end.
+
+flow_delegated(_Flow, State) ->
+    {ok, State}.
+
+flow_status_changed(FlowId, NewStatus, State) ->
+    epce_server:flow_status_changed(FlowId, NewStatus),
+    {ok, State}.
+
+terminate(_Reason, _State) ->
+    ok.
diff --git a/src/te/apps/epce/src/epce_server.erl b/src/te/apps/epce/src/epce_server.erl
new file mode 100644
index 0000000000000000000000000000000000000000..d1d86b5766a3f8d8fe31b873dddbf7c8076ca0f5
--- /dev/null
+++ b/src/te/apps/epce/src/epce_server.erl
@@ -0,0 +1,346 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(epce_server).
+
+-behaviour(gen_server).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+-include_lib("pcep_server/include/pcep_server.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API Functions
+-export([start_link/0]).
+-export([get_flows/0]).
+-export([update_flow/2]).
+-export([initiate_flow/4]).
+
+% Handler Functions
+-export([session_opened/3]).
+-export([flow_added/1]).
+-export([flow_initiated/1]).
+-export([request_route/1]).
+-export([flow_status_changed/2]).
+
+% Behaviour gen_server functions
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_continue/2]).
+-export([handle_info/2]).
+-export([code_change/3]).
+-export([terminate/2]).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(LARGE_TIMEOUT, infinity).
+
+
+%%% RECORDS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(sess, {
+        id,
+        caps,
+        monref,
+        pid
+}).
+
+-record(state, {
+        bouncer,
+        sessions = #{},
+        sess_pids = #{},
+        flows = #{}
+}).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get_flows() ->
+    gen_server:call(?MODULE, get_flows).
+
+update_flow(FlowId, LabelStack) ->
+    gen_server:call(?MODULE, {update_flow, FlowId, LabelStack}).
+
+initiate_flow(Name, From, To, BindingLabel) ->
+    gen_server:call(?MODULE, {initiate_flow, Name, From, To,
+                              BindingLabel}, ?LARGE_TIMEOUT).
+
+
+%%% HANDLER FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+session_opened(Id, Caps, Pid) ->
+    gen_server:call(?MODULE, {session_opened, Id, Caps, Pid}).
+
+flow_added(Flow) ->
+    gen_server:call(?MODULE, {flow_added, Flow}).
+
+flow_initiated(Flow) ->
+    gen_server:call(?MODULE, {flow_initiated, Flow}).
+
+request_route(RouteReq) ->
+    gen_server:call(?MODULE, {request_route, RouteReq}).
+
+flow_status_changed(FlowId, NewStatus) ->
+    gen_server:call(?MODULE, {flow_status_changed, FlowId, NewStatus}).
+
+
+%%% BEHAVIOUR gen_server FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    {ok, bouncer_start(#state{})}.
+
+handle_call(get_flows, _From, #state{flows = Flows} = State) ->
+    {reply, {ok, Flows}, State};
+handle_call({update_flow, FlowId, Labels}, From,
+            #state{flows = Flows, sessions = SessMap} = State) ->
+    case maps:find(FlowId, Flows) of
+        error -> {reply, {error, flow_not_found}, State};
+        {ok, #{owner := Owner, route := #{} = R}} ->
+            case maps:find(Owner, SessMap) of
+                error -> {reply, {error, session_not_found}, State};
+                {ok, #sess{pid = Pid}} ->
+                    #{source := S, destination := D, constraints := C} = R,
+                    ReqRoute = routereq_from_labels(S, D, C, Labels),
+                    session_update_flow(State, Pid, FlowId, ReqRoute, From),
+                    {noreply, State}
+            end
+    end;
+handle_call({initiate_flow, Name, FromKey, ToKey, Binding}, From,
+            #state{sessions = SessMap} = State) ->
+    case {pcc_address(FromKey), pcc_address(ToKey)} of
+        {{error, Reason}, _} ->
+            {reply, {error, Reason}, State};
+        {_, {error, Reason}} ->
+            {reply, {error, Reason}, State};
+        {{ok, FromAddr}, {ok, ToAddr}} ->
+            case maps:find(FromAddr, SessMap) of
+                error -> {reply, {error, session_not_found}, State};
+                {ok, #sess{pid = Pid}} ->
+                    case compute_path(FromAddr, ToAddr) of
+                        {error, Reason} ->
+                            {reply, {error, Reason}, State};
+                        {ok, Labels} ->
+                            InitRoute = routeinit_from_labels(Name, FromAddr,
+                                            ToAddr, [], Binding, Labels),
+                            session_initiate_flow(State, Pid, InitRoute, From),
+                            {noreply, State}
+                    end
+            end
+    end;
+handle_call({session_opened, Id, Caps, Pid}, _From,
+            #state{sessions = SessMap, sess_pids = SessPids} = State) ->
+    logger:debug("Session with capabilities ~w open to ~w", [Caps, Id]),
+    case maps:find(Id, SessMap) of
+        {ok, _} -> {reply, {error, already_opened}, State};
+        error ->
+            MonRef = erlang:monitor(process, Pid),
+            SessRec = #sess{id = Id, caps = Caps, monref = MonRef, pid = Pid},
+            {reply, ok, State#state{
+                sessions = SessMap#{Id => SessRec},
+                sess_pids = SessPids#{Pid => SessRec}
+            }}
+    end;
+handle_call({flow_added, #{id := Id, route := Route} = Flow},
+            _From, #state{flows = Flows} = State) ->
+    logger:debug("Flow ~w with route ~w added", [Id, route_to_labels(Route)]),
+    {reply, ok, State#state{flows = Flows#{Id => Flow}}};
+handle_call({flow_initiated, #{id := Id, route := Route} = Flow},
+            _From, #state{flows = Flows} = State) ->
+    logger:debug("Flow ~w with route ~p initiated",
+                 [Id, route_to_labels(Route)]),
+    {reply, ok, State#state{flows = Flows#{Id => Flow}}};
+handle_call({request_route, RouteReq}, _From, State) ->
+    logger:info("Route from ~w to ~w requested",
+                [maps:get(source, RouteReq), maps:get(destination, RouteReq)]),
+    #{source := S, destination := D, constraints := C} = RouteReq,
+    case compute_path(S, D) of
+        {error, _Reason} = Error ->
+            {reply, Error, State};
+        {ok, Labels} ->
+            Route = routereq_from_labels(S, D, C, Labels),
+            {reply, {ok, Route}, State}
+    end;
+handle_call({flow_status_changed, FlowId, NewStatus}, _From,
+            #state{flows = Flows} = State) ->
+    logger:info("Flow ~w status changed to ~w", [FlowId, NewStatus]),
+    Flow = maps:get(FlowId, Flows),
+    {reply, ok, State#state{
+        flows = maps:put(FlowId, Flow#{status := NewStatus}, Flows)}};
+handle_call(Request, From, State) ->
+    logger:warning("Unexpected call from ~w: ~p", [From, Request]),
+    {reply, {error, unexpected_call}, State}.
+
+
+handle_cast(Request, State) ->
+    logger:warning("Unexpected cast: ~p", [Request]),
+    {noreply, State}.
+
+handle_continue(_Continue, State) ->
+    {noreply, State}.
+
+handle_info({flow_updated, FlowId, NewRoute, From},
+            #state{flows = Flows} = State) ->
+    logger:info("Flow ~w updated to ~w", [FlowId, route_to_labels(NewRoute)]),
+    case maps:find(FlowId, Flows) of
+        error -> {noreply, State};
+        {ok, Flow} ->
+            Flows2 = Flows#{FlowId => Flow#{route => NewRoute}},
+            gen_server:reply(From, ok),
+            {noreply, State#state{flows = Flows2}}
+    end;
+handle_info({flow_update_error, FlowId, Reason, From}, State) ->
+    logger:error("Flow ~w updated error: ~p", [FlowId, Reason]),
+    gen_server:reply(From, {error, Reason}),
+    {noreply, State};
+handle_info({flow_initiated, #{id := FlowId, route := Route} = Flow, From},
+            #state{flows = Flows} = State) ->
+    logger:info("Flow ~w initiated to ~p",
+                [FlowId, route_to_labels(Route)]),
+    gen_server:reply(From, {ok, FlowId}),
+    {noreply, State#state{flows = Flows#{FlowId => Flow}}};
+handle_info({flow_init_error, Reason, From}, State) ->
+    logger:error("Flow initialisation error: ~p", [Reason]),
+    gen_server:reply(From, {error, Reason}),
+    {noreply, State};
+handle_info({'DOWN', MonRef, process, Pid, _Reason},
+            #state{sessions = SessMap, sess_pids = PidMap} = State) ->
+    case maps:take(Pid, PidMap) of
+        {#sess{id = Id, monref = MonRef}, PidMap2} ->
+            SessMap2 = maps:remove(Id, SessMap),
+            %TODO: Do something about the flows from this session ?
+            {noreply, State#state{
+                sessions = SessMap2,
+                sess_pids = PidMap2
+            }};
+        _X ->
+            {noreply, State}
+    end;
+handle_info(Info, State) ->
+    logger:warning("Unexpected message: ~p", [Info]),
+    {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ted_index(Id) when is_binary(Id) -> id;
+ted_index({_, _, _, _}) -> pcc_address.
+
+pcc_address(Key) ->
+    case epce_ted:lookup(ted_index(Key), Key) of
+        {error, Reason} ->
+            logger:warning("Failed to find a PCC address for router ~p: ~p",
+                           [Key, Reason]),
+            {error, router_not_found};
+        {ok, #{pcc_address := Addr}} ->
+            {ok, Addr}
+    end.
+
+compute_path(From, To) when is_binary(From), is_binary(To) ->
+    compute_path_result(From, To, epce_ted:compute_path(id, From, To));
+compute_path({_, _, _, _} = From, {_, _, _, _} = To) ->
+    compute_path_result(From, To, epce_ted:compute_path(pcc_address, From, To)).
+
+compute_path_result(From, To, {error, Reason}) ->
+    logger:warning("Failed to find a route from ~p to ~p: ~p",
+                   [From, To, Reason]),
+    {error, route_not_found};
+compute_path_result(From, To, {ok, Devices}) ->
+    Labels = tl([L || #{mpls_label := L} <- Devices, L =/= undefined]),
+    logger:debug("Route from ~p to ~p: ~p", [From, To, Labels]),
+    {ok, Labels}.
+
+routereq_from_labels(Source, Destination, Constraints, Labels) ->
+    #{
+        source => Source,
+        destination => Destination,
+        constraints => Constraints,
+        steps => [
+            #{
+                is_loose => false,
+                nai_type => absent,
+                sid => #mpls_stack_entry{label = L}
+            }
+          || L <- Labels
+        ]
+    }.
+
+routeinit_from_labels(Name, Source, Destination, Constraints, Binding, Labels) ->
+    Route = routereq_from_labels(Source, Destination, Constraints, Labels),
+    Route#{
+        name => Name,
+        binding_label => Binding
+    }.
+
+route_to_labels(#{steps := Steps}) ->
+    [Sid#mpls_stack_entry.label || #{sid := Sid} <- Steps].
+
+
+%-- Session Interface Functions ------------------------------------------------
+
+session_update_flow(#state{bouncer = Pid}, SessPid, FlowId, Route, Args) ->
+    Pid ! {update_flow, SessPid, FlowId, Route, Args}.
+
+session_initiate_flow(#state{bouncer = Pid}, SessPid, Route, Args) ->
+    Pid ! {initiate_flow, SessPid, Route, Args}.
+
+bouncer_start(#state{bouncer = undefined} = State) ->
+    Self = self(),
+    Pid = erlang:spawn_link(fun() ->
+        bouncer_bootstrap(Self)
+    end),
+    receive bouncer_ready -> ok end,
+    State#state{bouncer = Pid}.
+
+bouncer_bootstrap(Parent) ->
+    Parent ! bouncer_ready,
+    bouncer_loop(Parent).
+
+bouncer_loop(Parent) ->
+    receive
+        {update_flow, SessPid, FlowId, ReqRoute, Args} ->
+            case pcep_server_session:update_flow(SessPid, FlowId, ReqRoute) of
+                {ok, NewRoute} ->
+                    Parent ! {flow_updated, FlowId, NewRoute, Args},
+                    bouncer_loop(Parent);
+                {error, Reason} ->
+                    Parent ! {flow_update_error, FlowId, Reason, Args},
+                    bouncer_loop(Parent)
+            end;
+        {initiate_flow, SessPid, InitRoute, Args} ->
+            case pcep_server_session:initiate_flow(SessPid, InitRoute) of
+                {ok, Flow} ->
+                    Parent ! {flow_initiated, Flow, Args},
+                    bouncer_loop(Parent);
+                {error, Reason} ->
+                    Parent ! {flow_init_error, Reason, Args},
+                    bouncer_loop(Parent)
+            end
+    end.
diff --git a/src/te/apps/epce/src/epce_sup.erl b/src/te/apps/epce/src/epce_sup.erl
new file mode 100644
index 0000000000000000000000000000000000000000..79c17c9a869a5bdba078e225a7187c0ebcdbcd92
--- /dev/null
+++ b/src/te/apps/epce/src/epce_sup.erl
@@ -0,0 +1,59 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(epce_sup).
+
+-behaviour(supervisor).
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Behaviour supervisor functions
+-export([start_link/0]).
+-export([init/1]).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(TED_WORKER, epce_ted).
+-define(PCE_WORKER, epce_server).
+
+
+%%% BEHAVIOUR SUPERVISOR FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
+
+start_link() ->
+    supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+    SupFlags = #{
+        strategy => one_for_all,
+        intensity => 0,
+        period => 1
+    },
+    TEDSpec = #{
+        id => ?TED_WORKER,
+        start => {?TED_WORKER, start_link, []},
+        restart => permanent,
+        shutdown => brutal_kill
+    },
+    ServerSpec = #{
+        id => ?PCE_WORKER,
+        start => {?PCE_WORKER, start_link, []},
+        restart => permanent,
+        shutdown => brutal_kill
+    },
+    {ok, {SupFlags, [TEDSpec, ServerSpec]}}.
+
diff --git a/src/te/apps/epce/src/epce_ted.erl b/src/te/apps/epce/src/epce_ted.erl
new file mode 100644
index 0000000000000000000000000000000000000000..8313bec1ca6b3b75e374dfef7b8fd23d29690d68
--- /dev/null
+++ b/src/te/apps/epce/src/epce_ted.erl
@@ -0,0 +1,239 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(epce_ted).
+
+-behaviour(gen_server).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API Functions
+-export([start_link/0]).
+-export([device_added/2]).
+-export([device_updated/2]).
+-export([device_deleted/1]).
+-export([link_added/2]).
+-export([link_updated/2]).
+-export([link_deleted/1]).
+-export([compute_path/3]).
+-export([lookup/2]).
+
+-export([get_graph/0]).
+
+% Behaviour gen_server functions
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_continue/2]).
+-export([handle_info/2]).
+-export([code_change/3]).
+-export([terminate/2]).
+
+
+%%% RECORDS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(state, {
+    graph :: diagraph:graph(),
+    pcc_address_to_id = #{} :: map()
+}).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+device_added(Id, Device) ->
+    gen_server:call(?MODULE, {device_added, Id, Device}).
+
+device_updated(Id, Device) ->
+    gen_server:call(?MODULE, {device_updated, Id, Device}).
+
+device_deleted(Id) ->
+    gen_server:call(?MODULE, {device_deleted, Id}).
+
+link_added(Id, Link) ->
+    gen_server:call(?MODULE, {link_added, Id, Link}).
+
+link_updated(Id, Link) ->
+    gen_server:call(?MODULE, {link_updated, Id, Link}).
+
+link_deleted(Id) ->
+    gen_server:call(?MODULE, {link_deleted, Id}).
+
+compute_path(Index, From, To)
+  when Index =:= id; Index =:= pcc_address ->
+    gen_server:call(?MODULE, {compute_path, Index, From, To});
+compute_path(Index, _From, _To) ->
+    {error, {invalid_index, Index}}.
+
+lookup(Index, Key)
+  when Index =:= id; Index =:= pcc_address ->
+    gen_server:call(?MODULE, {lookup, Index, Key});
+lookup(Index, _Key) ->
+    {error, {invalid_index, Index}}.
+
+
+get_graph() ->
+    gen_server:call(?MODULE, get_graph).
+
+
+%%% BEHAVIOUR gen_server FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    ?LOG_INFO("Starting TED process...", []),
+    % {ok, #state{graph = digraph:new([private, cyclic])}}.
+    {ok, #state{graph = digraph:new([protected, cyclic])}}.
+
+handle_call({device_added, Id, Device}, _From, State) ->
+    ?LOG_DEBUG("Adding TED device ~p: ~p", [Id, Device]),
+    {reply, ok, do_update_device(State, Id, Device)};
+handle_call({device_updated, Id, Device}, _From, State) ->
+    ?LOG_DEBUG("Updating TED device ~p: ~p", [Id, Device]),
+    {reply, ok, do_update_device(State, Id, Device)};
+handle_call({device_deleted, Id}, _From, State) ->
+    ?LOG_DEBUG("Deleting TED device ~p", [Id]),
+    {reply, ok, do_delete_device(State, Id)};
+handle_call({link_added, Id, Link}, _From, State) ->
+    ?LOG_DEBUG("Adding TED link ~p: ~p", [Id, Link]),
+    {reply, ok, do_update_link(State, Id, Link)};
+handle_call({link_updated, Id, Link}, _From, State) ->
+    ?LOG_DEBUG("Updating TED link ~p: ~p", [Id, Link]),
+    {reply, ok, do_update_link(State, Id, Link)};
+handle_call({link_deleted, Id}, _From, State) ->
+    ?LOG_DEBUG("Deleting TED link ~p", [Id]),
+    {reply, ok, do_delete_link(State, Id)};
+handle_call({compute_path, Index, From, To}, _From, #state{graph = G} = State) ->
+    case as_ids(State, Index, [From, To]) of
+        {ok, [FromId, ToId]} ->
+            {reply, do_compute_path(G, FromId, ToId), State};
+        {error, Reason} ->
+            {reply, {error, Reason}, State}
+    end;
+handle_call({lookup, Index, Key}, _From, #state{graph = G} = State) ->
+    case as_ids(State, Index, [Key]) of
+        {ok, [Id]} ->
+            {reply, do_lookup(G, Id), State};
+        {error, Reason} ->
+            {reply, {error, Reason}, State}
+    end;
+handle_call(get_graph, _From, #state{graph = G} = State) ->
+    {reply, G, State};
+handle_call(Request, _From, State) ->
+    logger:warning("Unexpected call to TED process ~w", [Request]),
+    {reply, {error, unexpected_call}, State}.
+
+handle_cast(Request, State) ->
+    logger:warning("Unexpected cast to TED process ~w", [Request]),
+    {noreply, State}.
+
+handle_continue(_Continue, State) ->
+    {noreply, State}.
+
+handle_info(Info, State) ->
+    logger:warning("Unexpected message to TED process ~w", [Info]),
+    {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+terminate(_Reason, _State) ->
+    ?LOG_INFO("Terminating TED process...", []),
+    ok.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+as_ids(_State, id, Keys) ->
+    {ok, Keys};
+as_ids(State, IndexType, Keys) ->
+    as_ids(State, IndexType, Keys, []).
+
+as_ids(_State, _IndexType, [], Acc) ->
+    {ok, lists:reverse(Acc)};
+as_ids(#state{pcc_address_to_id = Index} = State, pcc_address, [Key | Rest], Acc) ->
+    case maps:find(Key, Index) of
+        error -> {error, {unknown_key, Key}};
+        {ok, Id} -> as_ids(State, pcc_address, Rest, [Id | Acc])
+    end.
+
+do_update_device(#state{graph = G} = State, Id, NewDevice) ->
+    State2 = case digraph:vertex(G, Id) of
+        false -> State;
+        {Id, OldDevice} -> index_remove_device(State, OldDevice)
+    end,
+    digraph:add_vertex(G, Id, NewDevice),
+    index_add_device(State2, NewDevice).
+
+do_delete_device(#state{graph = G} = State, Id) ->
+    case digraph:vertex(G, Id) of
+        false -> State;
+        {Id, OldDevice} ->
+            digraph:del_vertex(G, Id),
+            index_remove_device(State, OldDevice)
+    end.
+
+index_remove_device(#state{pcc_address_to_id = Index} = State,
+                    #{pcc_address := OldAddress}) ->
+    Index2 = maps:remove(OldAddress, Index),
+    State#state{pcc_address_to_id = Index2}.
+
+index_add_device(State, #{pcc_address := undefined}) ->
+    State;
+index_add_device(#state{pcc_address_to_id = Index} = State,
+                 #{id := Id, pcc_address := NewAddress}) ->
+    Index2 = Index#{NewAddress => Id},
+    State#state{pcc_address_to_id = Index2}.
+
+do_update_link(#state{graph = G} = State, Id, Link) ->
+    #{endpoints := [EP1, EP2]} = Link,
+    #{device := D1} = EP1,
+    #{device := D2} = EP2,
+    digraph:add_edge(G, {Id, a}, D1, D2, Link),
+    digraph:add_edge(G, {Id, b}, D2, D1, Link),
+    State.
+
+do_delete_link(#state{graph = G} = State, Id) ->
+    digraph:del_edge(G, {Id, a}),
+    digraph:del_edge(G, {Id, b}),
+    State.
+
+do_compute_path(G, FromId, ToId) ->
+    case digraph:get_short_path(G, FromId, ToId) of
+        false -> {error, not_found};
+        Ids -> {ok, retrieve_devices(G, Ids, [])}
+    end.
+
+do_lookup(G, Id) ->
+    case digraph:vertex(G, Id) of
+        {_, Info} -> {ok, Info};
+        false -> {error, not_found}
+    end.
+
+retrieve_devices(_G, [], Acc) ->
+    lists:reverse(Acc);
+retrieve_devices(G, [Id | Rest], Acc) ->
+    case digraph:vertex(G, Id) of
+        false -> {error, invalid_path};
+        {Id, Device} ->
+            retrieve_devices(G, Rest, [Device | Acc])
+    end.
diff --git a/src/te/apps/tfte/src/tfte.app.src b/src/te/apps/tfte/src/tfte.app.src
new file mode 100644
index 0000000000000000000000000000000000000000..abebf116975d5a820d2da462c319f67fdbf93118
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte.app.src
@@ -0,0 +1,32 @@
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
+{application, tfte,
+ [{description, "Teraflow Traffic Engineering Service"},
+  {vsn, "0.1.0"},
+  {registered, []},
+  {mod, {tfte_app, []}},
+  {applications,
+   [kernel,
+    stdlib,
+    tfpb,
+    jsx,
+    epce
+   ]},
+  {env,[]},
+  {modules, []},
+
+  {licenses, ["Apache 2.0"]},
+  {links, []}
+ ]}.
diff --git a/src/te/apps/tfte/src/tfte_app.erl b/src/te/apps/tfte/src/tfte_app.erl
new file mode 100644
index 0000000000000000000000000000000000000000..a629a1b139d4d2a2965ff724676f23b6734ddd11
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_app.erl
@@ -0,0 +1,110 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% @doc tfte public API
+%% @end
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_app).
+
+-behaviour(application).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Behaviour application callback functions
+-export([start/2, stop/1]).
+
+
+%%% BEHAVIOUR applicaation CALLBACK FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start(_StartType, _StartArgs) ->
+    case tfte_sup:start_link() of
+        {ok, Pid} ->
+            add_services(),
+            {ok, Pid};
+        Other ->
+            Other
+    end.
+
+stop(_State) ->
+    ok.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+add_services() ->
+    case application:get_env(tfte, services) of
+        {ok, Services} -> add_services(Services);
+        _ -> ok
+    end.
+
+add_services([]) -> ok;
+add_services([{Name, EndpointsSpecs, GrpcOpts} | Rest]) ->
+    try resolve_endpoints(Name, EndpointsSpecs, []) of
+        Endpoints ->
+            case grpcbox_channel_sup:start_child(Name, Endpoints, GrpcOpts) of
+                {ok, _Pid} ->
+                    ?LOG_INFO("GRPC channel to ~s service started", [Name]),
+                    ok;
+                {error, Reason} ->
+                    ?LOG_WARNING("GRPC channel to ~s service failed to start: ~p",
+                                 [Name, Reason]),
+                    ok
+            end
+    catch
+        throw:{Name, Reason, Extra} ->
+            ?LOG_WARNING("Failed to resolve ~s service configuration: ~s ~p",
+                         [Name, Reason, Extra])
+    end,
+    add_services(Rest).
+
+resolve_endpoints(_Name, [], Acc) ->
+    lists:reverse(Acc);
+resolve_endpoints(Name, [{Transport, HostSpec, PortSpec, SslOpts} | Rest], Acc) ->
+    Acc2 = [{Transport, resolve_host_spec(Name, HostSpec),
+             resolve_port_spec(Name, PortSpec), SslOpts} | Acc],
+    resolve_endpoints(Name, Rest, Acc2).
+
+resolve_host_spec(_Name, Hostname) when is_list(Hostname) -> Hostname;
+resolve_host_spec(Name, {env, Key}) when is_list(Key) ->
+    try os:getenv(Key) of
+        false -> throw({Name, service_hostname_not_found, Key});
+        Hostname -> Hostname
+    catch
+        _:Reason ->
+            throw({Name, service_hostname_error, Reason})
+    end.
+
+resolve_port_spec(_Name, Port) when is_integer(Port) -> Port;
+resolve_port_spec(Name, {env, Key}) when is_list(Key) ->
+    try os:getenv(Key) of
+        false -> throw({Name, service_port_not_found, Key});
+        PortStr ->
+            try list_to_integer(PortStr) of
+                Port -> Port
+            catch
+                _:Reason ->
+                    throw({Name, service_port_error, Reason})
+            end
+    catch
+        _:Reason ->
+            throw({Name, service_port_error, Reason})
+    end.
diff --git a/src/te/apps/tfte/src/tfte_context.erl b/src/te/apps/tfte/src/tfte_context.erl
new file mode 100644
index 0000000000000000000000000000000000000000..453852f34dd5210b8c7d407d967a3da36f635349
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_context.erl
@@ -0,0 +1,187 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_context).
+
+-behaviour(gen_statem).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API functions
+-export([start_link/0]).
+-export([is_ready/0]).
+
+% Behaviour gen_statem functions
+-export([init/1]).
+-export([callback_mode/0]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+
+%%% Records %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(data, {
+    uuid :: map(),
+    sub :: term() | undefined,
+    obj :: map() | undefined
+}).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(SUBSCRIBE_RETRY_TIMEOUT, 1000).
+-define(RETRIEVE_RETRY_TIMEOUT, 10000).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+is_ready() ->
+    case whereis(?MODULE) of
+        undefined -> false;
+        _ -> gen_statem:call(?MODULE, is_ready)
+    end.
+
+
+%%% BEHAVIOUR gen_statem FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    {ok, Name} = application:get_env(tfte, context),
+    ?LOG_INFO("Starting context ~s service handler...", [Name]),
+    UUID = #{context_uuid => #{uuid => Name}},
+    {ok, subscribe, #data{uuid = UUID}}.
+
+callback_mode() -> [handle_event_function, state_enter].
+
+%-- SUBSCRIBE STATE ------------------------------------------------------------
+handle_event(enter, _, subscribe, #data{sub = undefined}) ->
+    {keep_state_and_data, [{state_timeout, 0, do_suscribe}]};
+handle_event(enter, _, subscribe, Data) ->
+    % We already have a context subscription
+    {next_state, ready, Data};
+handle_event(state_timeout, do_suscribe, subscribe, Data) ->
+    ?LOG_DEBUG("Subscribing to context events...", []),
+    case do_subscribe() of
+        {ok, Sub} ->
+            ?LOG_INFO("Subscribed to context events", []),
+            Data2 = Data#data{sub = Sub},
+            {next_state, retrieve, Data2};
+        {error, Reason} ->
+            ?LOG_ERROR("Failed to subscribe to context service events: ~p", [Reason]),
+            {keep_state_and_data, [{state_timeout, ?SUBSCRIBE_RETRY_TIMEOUT, do_suscribe}]}
+    end;
+%-- RETRIEVE STATE -------------------------------------------------------------
+handle_event(enter, _, retrieve, _Data) ->
+    {keep_state_and_data, [{state_timeout, 0, do_retrieve}]};
+handle_event(state_timeout, do_retrieve, retrieve, #data{uuid = UUID} = Data) ->
+    ?LOG_DEBUG("Retrieving context ~p...", [UUID]),
+    case get_object(UUID) of
+        error ->
+            {keep_state_and_data, [{state_timeout, ?RETRIEVE_RETRY_TIMEOUT, do_retrieve}]};
+        {ok, Context} ->
+            ?LOG_DEBUG("Got context: ~p", [Context]),
+            tfte_server:context_ready(Context),
+            {next_state, ready, Data#data{obj = Context}}
+    end;
+handle_event(info, {headers, Id, Value}, retrieve,
+             #data{sub = #{stream_id := Id}}) ->
+    %TODO: Handle HTTP errors ???
+    ?LOG_DEBUG("Received context stream header: ~p", [Value]),
+    keep_state_and_data;
+handle_event(info, {data, Id, Value}, retrieve,
+             #data{sub = #{stream_id := Id}}) ->
+    ?LOG_DEBUG("Received context event, retrying context: ~p", [Value]),
+    {keep_state_and_data, [{state_timeout, 0, do_retrieve}]};
+handle_event(info, {'DOWN', Ref, process, Pid, Reason}, retrieve,
+             #data{sub = #{stream_id := Id, monitor_ref := Ref, stream_pid := Pid}} = Data) ->
+    ?LOG_DEBUG("Context subscription is down: ~p", [Reason]),
+    Data2 = Data#data{sub = undefined},
+    Info = receive
+        {trailers, Id, {Status, Message, Metadata}} ->
+            {Reason, Status, Message, Metadata}
+    after 0 ->
+        Reason
+    end,
+    ?LOG_ERROR("Context subscription error: ~p", [Info]),
+    {next_state, subscribe, Data2};
+%-- READY STATE ----------------------------------------------------------------
+handle_event(enter, _, ready, _Data) ->
+    keep_state_and_data;
+handle_event(info, {headers, Id, Value}, ready,
+             #data{sub = #{stream_id := Id}}) ->
+    %TODO: Handle HTTP errors ???
+    ?LOG_DEBUG("Received context stream header: ~p", [Value]),
+    keep_state_and_data;
+handle_event(info, {data, Id, #{context_id := UUID, event := Event}}, ready,
+             #data{uuid = UUID, sub = #{stream_id := Id}}) ->
+    ?LOG_DEBUG("Received context event: ~p", [Event]),
+    tfte_server:context_event(Event),
+    keep_state_and_data;
+handle_event(info, {'DOWN', Ref, process, Pid, Reason}, ready,
+             #data{sub = #{stream_id := Id, monitor_ref := Ref, stream_pid := Pid}} = Data) ->
+    ?LOG_DEBUG("Context subscription is down: ~p", [Reason]),
+    Data2 = Data#data{sub = undefined},
+    Info = receive
+        {trailers, Id, {Status, Message, Metadata}} ->
+            {Reason, Status, Message, Metadata}
+    after 0 ->
+        Reason
+    end,
+    ?LOG_ERROR("Context subscription error: ~p", [Info]),
+    {next_state, subscribe, Data2};
+%-- ANY STATE ------------------------------------------------------------------
+handle_event({call, _From}, is_ready, State, _Data) ->
+    {keep_state_and_data, [{reply, State =:= ready}]};
+handle_event(info, Msg, StateName, _Data) ->
+    ?LOG_WARNING("Unexpected context message in state ~w: ~p", [StateName, Msg]),
+    keep_state_and_data.
+
+terminate(Reason, _State, _Data) ->
+    ?LOG_INFO("Context service handler terminated: ~p", [Reason]),
+    ok.
+
+code_change(_OldVsn, OldState, OldData, _Extra) ->
+    {ok, OldState, OldData}.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+grpc_opts() ->
+    #{channel => context}.
+
+do_subscribe() ->
+    context_context_service_client:get_context_events(#{}, grpc_opts()).
+
+get_object(UUID) ->
+    case context_context_service_client:get_context(UUID, grpc_opts()) of
+        {error, Reason} -> 
+            ?LOG_ERROR("Local error while retrieving the context object: ~p", [Reason]),
+            error;
+        {error, Reason, _Headers} ->
+            ?LOG_ERROR("Remote error while retrieving the context object: ~p", [Reason]),
+            error;
+        {ok, Result, _Headers} ->
+            {ok, Result}
+    end.
\ No newline at end of file
diff --git a/src/te/apps/tfte/src/tfte_server.erl b/src/te/apps/tfte/src/tfte_server.erl
new file mode 100644
index 0000000000000000000000000000000000000000..002bda810e901debddd72a3cfa7ec50bab8e3f97
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_server.erl
@@ -0,0 +1,189 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_server).
+
+-behaviour(gen_statem).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API functions
+-export([start_link/0]).
+-export([context_ready/1]).
+-export([context_event/1]).
+-export([topology_ready/1]).
+-export([topology_event/1]).
+-export([request_lsp/1]).
+-export([delete_lsp/1]).
+
+% Behaviour gen_statem functions
+-export([init/1]).
+-export([callback_mode/0]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+
+%%% Records %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(data, {
+    services = #{}
+}).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+context_ready(Context) ->
+    gen_statem:cast(?MODULE, {context_ready, Context}).
+
+context_event(Event) ->
+    gen_statem:cast(?MODULE, {context_event, Event}).
+
+topology_ready(Topology) ->
+    gen_statem:cast(?MODULE, {topology_ready, Topology}).
+
+topology_event(Event) ->
+    gen_statem:cast(?MODULE, {topology_event, Event}).
+
+request_lsp(ServiceMap) ->
+    gen_statem:call(?MODULE, {request_lsp, ServiceMap}).
+
+delete_lsp(ServiceId) ->
+    gen_statem:call(?MODULE, {delete_lsp, ServiceId}).
+
+
+%%% BEHAVIOUR gen_statem FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    ?LOG_INFO("Starting server...", []),
+    case tfte_context:is_ready() of
+        false -> {ok, wait_context, #data{}};
+        true -> {ok, ready, #data{}}
+    end.
+
+callback_mode() -> [handle_event_function, state_enter].
+
+%-- WAIT_CONTEXT STATE ---------------------------------------------------------
+handle_event(enter, _, wait_context, _Data) ->
+    keep_state_and_data;
+handle_event(cast, {context_ready, _Context}, wait_context, Data) ->
+    ?LOG_DEBUG("Teraflow context initialized: ~p", [_Context]),
+    tfte_topology:context_updated(),
+    {next_state, ready, Data};
+%-- READY STATE ----------------------------------------------------------------
+handle_event(enter, _, ready, _Data) ->
+    keep_state_and_data;
+handle_event(cast, {context_ready, _Context}, ready, _Data) ->
+    ?LOG_DEBUG("Teraflow context updated: ~p", [_Context]),
+    tfte_topology:context_updated(),
+    keep_state_and_data;
+handle_event(cast, {context_event, _Event}, ready, _Data) ->
+    ?LOG_DEBUG("Teraflow context event: ~p", [_Event]),
+    keep_state_and_data;
+handle_event(cast, {topology_ready, _Topology}, ready, _Data) ->
+    ?LOG_DEBUG("Teraflow topology updated: ~p", [_Topology]),
+    keep_state_and_data;
+handle_event(cast, {topology_event, _Event}, ready, _Data) ->
+    ?LOG_DEBUG("Teraflow topology event: ~p", [_Event]),
+    keep_state_and_data;
+handle_event({call, From}, {request_lsp, ServiceMap}, ready, Data) ->
+    ?LOG_DEBUG("Teraflow service ~s requested its LSPs",
+               [format_service_id(maps:get(service_id, ServiceMap, undefined))]),
+    {Result, Data2} = do_request_lsp(Data, ServiceMap),
+    {keep_state, Data2, [{reply, From, Result}]};
+handle_event({call, From}, {delete_lsp, ServiceId}, ready, Data) ->
+    ?LOG_DEBUG("Teraflow service ~s delete its LSPs",
+              [format_service_id(ServiceId)]),
+    {Result, Data2} = do_delete_lsp(Data, ServiceId),
+    {keep_state, Data2, [{reply, From, Result}]};
+%-- ANY STATE ------------------------------------------------------------------
+handle_event(EventType, EventContent, State, _Data) ->
+    ?LOG_WARNING("Unexpected tfte_server ~w event in state ~w: ~w",
+                 [EventType, State, EventContent]),
+    keep_state_and_data.
+
+terminate(Reason, _State, _Data) ->
+    ?LOG_INFO("Server terminated: ~p", [Reason]),
+    ok.
+
+code_change(_OldVsn, OldState, OldData, _Extra) ->
+    {ok, OldState, OldData}.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+format_service_id(undefined) -> <<"undefined">>;
+format_service_id(#{context_id := #{context_uuid := #{uuid := ContextName}},
+                    service_uuid := #{uuid := ServiceUUID}}) ->
+    iolist_to_binary(io_lib:format("~s:~s", [ContextName, ServiceUUID])).
+
+do_request_lsp(#data{services = Services} = Data,
+               #{service_type := 'SERVICETYPE_TE'} = ServiceMap) ->
+    try
+
+    #{service_config := Config,
+      service_endpoint_ids := Endpoints,
+      service_id := ServiceId} = ServiceMap,
+    #{<<"binding_label">> := BindingLabel1, <<"symbolic_name">> := SymbolicName1}
+        = tfte_util:custom_config(Config, <<"/lsp-fw">>),
+    #{<<"binding_label">> := BindingLabel2, <<"symbolic_name">> := SymbolicName2}
+        = tfte_util:custom_config(Config, <<"/lsp-bw">>),
+    [#{device_id := #{device_uuid := #{uuid := Id1}}},
+     #{device_id := #{device_uuid := #{uuid := Id2}}}] = Endpoints,
+    case epce_server:initiate_flow(SymbolicName1, Id1, Id2, BindingLabel1) of
+        {error, Reason} ->
+            ?LOG_ERROR("Error while setting up service ~s forward LSP: ~p",
+                       [format_service_id(ServiceId), Reason]),
+            {{error, Reason}, Data};
+        {ok, ForwardFlow} ->
+            case epce_server:initiate_flow(SymbolicName2, Id2, Id1, BindingLabel2) of
+                {error, Reason} ->
+                    ?LOG_ERROR("Error while setting up service ~s backward LSP: ~p",
+                               [format_service_id(ServiceId), Reason]),
+                    %TODO: Cleanup forward flow ?
+                    {{error, Reason}, Data};
+                {ok, BackwardFlow} ->
+                    ServiceData = {ServiceMap, ForwardFlow, BackwardFlow},
+                    Services2 = Services#{ServiceId => ServiceData},
+                    Data2 = Data#data{services = Services2},
+                    {{ok, 'SERVICESTATUS_ACTIVE'}, Data2}
+            end
+    end
+
+    catch T:E:S ->
+        ?LOG_ERROR("Error while requesintg LSP: ~p:~p", [T, E]),
+        ?LOG_ERROR("Stacktrace: ~p", [S]),
+        {{error, internal_error}, Data}
+    end;
+do_request_lsp(Data, ServiceMap) ->
+    ?LOG_ERROR("Invalid arguments to RequestLSP call: ~p", [ServiceMap]),
+    {{error, badarg}, Data}.
+
+do_delete_lsp(Data, ServiceId) ->
+    ?LOG_INFO("LSP DELETION REQUESTED ~p", [ServiceId]),
+    {{error, not_implemented}, Data}.
\ No newline at end of file
diff --git a/src/te/apps/tfte/src/tfte_service_sup.erl b/src/te/apps/tfte/src/tfte_service_sup.erl
new file mode 100644
index 0000000000000000000000000000000000000000..6ec5d09b069ca856f6298fdc8effa2bb01cf115a
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_service_sup.erl
@@ -0,0 +1,64 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% @doc tfte service supervisor.
+%% @end
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_service_sup).
+
+-behaviour(supervisor).
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API Functions
+-export([start_link/0]).
+
+% Behaviour supervisor callback functions
+-export([init/1]).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(SERVER, ?MODULE).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+
+%%% BEHAVIOUR supervisor CALLBACK FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    SupFlags = #{strategy => one_for_one,
+                 intensity => 0,
+                 period => 1},
+    ContextSpec = #{
+        id => tfte_context,
+        start => {tfte_context, start_link, []},
+        restart => permanent,
+        shutdown => brutal_kill
+    },
+    TopologySpec = #{
+        id => tfte_topology,
+        start => {tfte_topology, start_link, []},
+        restart => permanent,
+        shutdown => brutal_kill
+    },
+    ChildSpecs = [ContextSpec, TopologySpec],
+    {ok, {SupFlags, ChildSpecs}}.
diff --git a/src/te/apps/tfte/src/tfte_sup.erl b/src/te/apps/tfte/src/tfte_sup.erl
new file mode 100644
index 0000000000000000000000000000000000000000..4630511d6fec909b002acd57bec61790e54f7e94
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_sup.erl
@@ -0,0 +1,66 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% @doc tfte top level supervisor.
+%% @end
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_sup).
+
+-behaviour(supervisor).
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API Functions
+-export([start_link/0]).
+
+% Behaviour supervisor callback functions
+-export([init/1]).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(SERVER, ?MODULE).
+-define(ROOT_SERVER, tfte_server).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+
+%%% BEHAVIOUR supervisor CALLBACK FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    SupFlags = #{strategy => one_for_all,
+                 intensity => 0,
+                 period => 1},
+    ServiceSupSpec = #{
+        id => service_sup,
+        start => {tfte_service_sup, start_link, []},
+        restart => permanent,
+        type => supervisor,
+        shutdown => brutal_kill
+    },
+    ServerSpec = #{
+        id => ?ROOT_SERVER,
+        start => {?ROOT_SERVER, start_link, []},
+        restart => permanent,
+        shutdown => brutal_kill
+    },
+    ChildSpecs = [ServerSpec, ServiceSupSpec],
+    {ok, {SupFlags, ChildSpecs}}.
diff --git a/src/te/apps/tfte/src/tfte_te_service.erl b/src/te/apps/tfte/src/tfte_te_service.erl
new file mode 100644
index 0000000000000000000000000000000000000000..b9911ee37a5719c838baca7f86dd326b810f9bc8
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_te_service.erl
@@ -0,0 +1,66 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_te_service).
+
+-behaviour(te_te_service_bhvr).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("grpcbox/include/grpcbox.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% Behaviour te_te_service_bhvr callback functions
+-export([request_lsp/2]).
+-export([update_lsp/2]).
+-export([delete_lsp/2]).
+
+
+%%% BEHAVIOUR te_te_service_bhvr CALLBACK FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+request_lsp(Ctx, Service) ->
+    ?LOG_INFO("Requesting LSP: ~p", [Service]),
+    try tfte_server:request_lsp(Service) of
+        {ok, Status} ->
+            {ok, #{service_status => Status}, Ctx};
+        {error, Reason} ->
+            ?LOG_INFO("Error while requesting LSP: ~p", [Reason]),
+            {ok, #{service_status => 'SERVICESTATUS_UNDEFINED'}, Ctx}
+    catch E:R:S ->
+        ?LOG_ERROR("Error while requesting LSP: ~p:~p ~p", [E, R, S]),
+        {ok, #{service_status => 'SERVICESTATUS_UNDEFINED'}, Ctx}
+    end.
+
+update_lsp(_Ctx, _ServiceId) ->
+    {error, {?GRPC_STATUS_UNIMPLEMENTED, <<"Not yet implemented">>},
+             #{headers => #{}, trailers => #{}}}.
+
+delete_lsp(Ctx, ServiceId) ->
+    ?LOG_ERROR("Deleting LSP: ~p", [ServiceId]),
+    try tfte_server:delete_lsp(ServiceId) of
+        {ok, Status} ->
+            {ok, Status, Ctx};
+        {error, Reason} ->
+            ?LOG_INFO("Error while deleting LSP: ~p", [Reason]),
+            {ok, #{service_status => 'SERVICESTATUS_UNDEFINED'}, Ctx}
+    catch E:R:S ->
+        ?LOG_ERROR("Error while deleting LSP: ~p:~p ~p", [E, R, S]),
+        {ok, #{service_status => 'SERVICESTATUS_UNDEFINED'}, Ctx}
+    end.
diff --git a/src/te/apps/tfte/src/tfte_topology.erl b/src/te/apps/tfte/src/tfte_topology.erl
new file mode 100644
index 0000000000000000000000000000000000000000..39897caa800ebed291d34d91b4a6b154c90f0f2e
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_topology.erl
@@ -0,0 +1,405 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_topology).
+
+-behaviour(gen_statem).
+
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API functions
+-export([start_link/0]).
+-export([context_updated/0]).
+
+% Behaviour gen_statem functions
+-export([init/1]).
+-export([callback_mode/0]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+
+%%% Records %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-record(data, {
+    uuid :: map(),
+    sub :: term() | undefined,
+    obj :: map() | undefined,
+    devices = #{} :: map(),
+    links = #{} :: map(),
+    names = #{} :: map()
+}).
+
+
+%%% MACROS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(SUBSCRIBE_RETRY_TIMEOUT, 1000).
+-define(RETRIEVE_RETRY_TIMEOUT, 10000).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+start_link() ->
+    gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+context_updated() ->
+    gen_statem:cast(?MODULE, context_updated).
+
+
+%%% BEHAVIOUR gen_statem FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init([]) ->
+    {ok, ContextName} = application:get_env(tfte, context),
+    {ok, TopoName} = application:get_env(tfte, topology),
+    ContextUUID = #{context_uuid => #{uuid => ContextName}},
+    TopoUUID = #{context_id => ContextUUID,
+                 topology_uuid => #{uuid => TopoName}},
+    ?LOG_INFO("Starting topology ~s service handler...", [TopoName]),
+    {ok, retrieve, #data{uuid = TopoUUID}}.
+
+callback_mode() -> [handle_event_function, state_enter].
+
+%-- RETRIEVE STATE -------------------------------------------------------------
+handle_event(enter, _, retrieve, _Data) ->
+    {keep_state_and_data, [{state_timeout, 0, do_retrieve}]};
+handle_event(state_timeout, do_retrieve, retrieve, #data{uuid = UUID} = Data) ->
+    ?LOG_DEBUG("Retrieving topology ~p...", [UUID]),
+    case get_object(UUID) of
+        error ->
+            {keep_state_and_data, [{state_timeout, ?RETRIEVE_RETRY_TIMEOUT, do_retrieve}]};
+        {ok, #{device_ids := Devices, link_ids := Links } = Topology} ->
+            case {length(Devices), length(Links)} of
+                {D, L} when D =:= 0; L =:= 0 ->
+                    ?LOG_WARNING("Got topology, but there is missing devices or links", []),
+                    {keep_state_and_data, [{state_timeout, 1000, do_retrieve}]};
+                _ ->
+                    ?LOG_DEBUG("Got topology: ~p", [Topology]),
+                    {next_state, subscribe, Data#data{obj = Topology}}
+            end
+    end;
+handle_event(cast, context_updated, retrieve, _Data) ->
+    {keep_state_and_data, [{state_timeout, 0, do_retrieve}]};
+%-- SUBSCRIBE STATE ------------------------------------------------------------
+handle_event(enter, _, subscribe, #data{sub = undefined}) ->
+    {keep_state_and_data, [{state_timeout, 0, do_suscribe}]};
+handle_event(enter, _, subscribe, Data) ->
+    % We already have a topology subscription
+    {next_state, ready, Data};
+handle_event(state_timeout, do_suscribe, subscribe, #data{uuid = UUID} = Data) ->
+    ?LOG_DEBUG("Subscribing to topology events...", []),
+    case do_subscribe(UUID) of
+        {ok, Sub} ->
+            ?LOG_INFO("Subscribed to topology events", []),
+            Data2 = #data{obj = Obj} = Data#data{sub = Sub},
+            #{device_ids := DeviceIds, link_ids := LinkIds} = Obj,
+            case update_topology(Data2, DeviceIds, LinkIds) of
+                {ok, Data3} ->
+                    tfte_server:topology_ready(Obj),
+                    {next_state, ready, Data3};
+                {error, Reason} ->
+                    ?LOG_ERROR("Failed to load topology: ~p", [Reason]),
+                    statem_rollback_to_retrieve(Data2)
+            end;
+        {error, Reason} ->
+            ?LOG_ERROR("Failed to subscribe to topology service events: ~p", [Reason]),
+            {next_state, retrieve, [{state_timeout, ?SUBSCRIBE_RETRY_TIMEOUT, do_retrieve}]}
+    end;
+%-- READY STATE ----------------------------------------------------------------
+handle_event(enter, _, ready, _Data) ->
+    keep_state_and_data;
+handle_event(info, {headers, Id, Value}, ready,
+             #data{sub = #{stream_id := Id}}) ->
+    %TODO: Handle HTTP errors ???
+    ?LOG_DEBUG("Received topology stream header: ~p", [Value]),
+    keep_state_and_data;
+handle_event(info, {data, Id, #{event := Event}}, ready,
+             #data{sub = #{stream_id := Id}} = Data) ->
+    ?LOG_DEBUG("Received topology event: ~p", [Event]),
+    handle_topology_event(Data, Event);
+handle_event(info, {'DOWN', Ref, process, Pid, Reason}, ready,
+             #data{sub = #{stream_id := Id, monitor_ref := Ref, stream_pid := Pid}} = Data) ->
+    ?LOG_DEBUG("Topology subscription is down: ~p", [Reason]),
+    Data2 = Data#data{sub = undefined},
+    Info = receive
+        {trailers, Id, {Status, Message, Metadata}} ->
+            {Reason, Status, Message, Metadata}
+    after 0 ->
+        Reason
+    end,
+    ?LOG_ERROR("Topology subscription error: ~p", [Info]),
+    {next_state, retrieve, Data2};
+handle_event(cast, context_updated, ready, _Data) ->
+    keep_state_and_data;
+%-- ANY STATE ------------------------------------------------------------------
+handle_event(info, Msg, StateName, _Data) ->
+    ?LOG_WARNING("Unexpected topology message in state ~w: ~p", [StateName, Msg]),
+    keep_state_and_data.
+
+terminate(Reason, _State, _Data) ->
+    ?LOG_INFO("Topology service handler terminated: ~p", [Reason]),
+    ok.
+
+code_change(_OldVsn, OldState, OldData, _Extra) ->
+    {ok, OldState, OldData}.
+
+
+%%% INTERNAL FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+statem_rollback_to_retrieve(#data{sub = undefined} = Data) ->
+    {next_state, retrieve, Data, [{state_timeout, ?RETRIEVE_RETRY_TIMEOUT, do_retrieve}]};
+statem_rollback_to_retrieve(#data{sub = Sub} = Data) ->
+    grpcbox_client:close_send_and_recv(Sub),
+    Data2 = Data#data{sub = undefined},
+    {next_state, retrieve, Data2, [{state_timeout, ?RETRIEVE_RETRY_TIMEOUT, do_retrieve}]}.
+
+handle_topology_event(#data{uuid = UUID} = Data,
+                      #{event_type := 'EVENTTYPE_UPDATE'} = Event) ->
+    case get_object(UUID) of
+        error ->
+            statem_rollback_to_retrieve(Data);
+        {ok, #{device_ids := DeviceIds, link_ids := LinkIds} = Topology} ->
+            ?LOG_DEBUG("Got new topology: ~p", [Topology]),
+            Data2 = Data#data{obj = Topology},
+            case update_topology(Data2, DeviceIds, LinkIds) of
+                {ok, Data3} ->
+                    tfte_server:topology_event(Event),
+                    {keep_state, Data3};
+                {error, Reason} ->
+                    ?LOG_ERROR("Failed to update topology: ~p", [Reason]),
+                    statem_rollback_to_retrieve(Data2)
+            end
+    end;
+handle_topology_event(_Data, Event) ->
+    tfte_server:topology_event(Event),
+    keep_state_and_data.
+
+update_topology(Data, DeviceIds, LinkIds) ->
+    try
+        {Data2, Events} = update_devices(Data, DeviceIds, []),
+        {Data3, Events2} = update_links(Data2, LinkIds, Events),
+        post_topology_events(lists:reverse(Events2)),
+        {ok, Data3}
+    catch
+        throw:Reason ->
+            {error, Reason}
+    end.
+
+post_topology_events(Events) ->
+    lists:foreach(fun post_topology_event/1, Events).
+
+post_topology_event({device_added, Id, Device}) ->
+    epce_ted:device_added(Id, Device);
+post_topology_event({device_updated, Id, Device}) ->
+    epce_ted:device_updated(Id, Device);
+post_topology_event({device_deleted, Id}) ->
+    epce_ted:device_deleted(Id);
+post_topology_event({link_added, Id, Link}) ->
+    epce_ted:link_added(Id, Link);
+post_topology_event({link_updated, Id, Link}) ->
+    epce_ted:link_updated(Id, Link);
+post_topology_event({link_deleted, Id}) ->
+    epce_ted:link_deleted(Id).
+
+update_devices(#data{devices = OldDevices} = Data, DeviceIds, Events) ->
+    update_devices(Data, OldDevices, #{}, DeviceIds, Events).
+
+update_devices(Data, OldDevices, NewDevices, [], Events) ->
+    #data{names = Names} = Data,
+    Events2 = [{device_deleted, maps:get(I, Names, undefined)}
+               || I <- maps:keys(OldDevices)] ++ Events,
+    {Data#data{devices = NewDevices}, Events2};
+update_devices(Data, OldDevices, NewDevices, [GivenId | Rest], Events) ->
+    case get_device(GivenId) of
+        error -> throw({device_retrieval_error, GivenId});
+        {ok, Device} ->
+            Device2 = #{id := Id, real_id := RealId} = post_process_device(Device),
+            #data{names = Names} = Data,
+            Data2 = Data#data{names = Names#{RealId => Id}},
+            NewDevices2 = NewDevices#{Id => Device},
+            case maps:take(Id, OldDevices) of
+                error ->
+                    % New device
+                    Events2 = [{device_added, Id, Device2} | Events],
+                    update_devices(Data2, OldDevices, NewDevices2, Rest, Events2);
+                {Device, OldDevices2} ->
+                    % Device did not change
+                    update_devices(Data2, OldDevices2, NewDevices2, Rest, Events);
+                {_OldDevice, OldDevices2} ->
+                    % Device changed
+                    Events2 = [{device_updated, Id, Device2} | Events],
+                    update_devices(Data2, OldDevices2, NewDevices2, Rest, Events2)
+            end
+    end.
+
+update_links(#data{links = OldLinks} = Data, LinksIds, Events) ->
+    update_links(Data, OldLinks, #{}, LinksIds, Events).
+
+update_links(Data, OldLinks, NewLinks, [], Events) ->
+    Events2 = [{link_deleted, post_process_link_id(I)}
+               || I <- maps:keys(OldLinks)] ++ Events,
+    {Data#data{links = NewLinks}, Events2};
+update_links(Data, OldLinks, NewLinks, [Id | Rest], Events) ->
+    case get_link(Id) of
+        error -> throw({link_retrieval_error, Id});
+        {ok, Link} ->
+            Id2 = post_process_link_id(Id),
+            Link2 = post_process_link(Data, Link),
+            NewLinks2 = NewLinks#{Id => Link},
+            case maps:take(Id, OldLinks) of
+                error ->
+                    % New Link
+                    Events2 = [{link_added, Id2, Link2} | Events],
+                    update_links(Data, OldLinks, NewLinks2, Rest, Events2);
+                {Link, OldLinks2} ->
+                    % Link did not change
+                    update_links(Data, OldLinks2, NewLinks2, Rest, Events);
+                {_OldLink, OldLinks2} ->
+                    % Link changed
+                    Events2 = [{link_updated, Id2, Link2} | Events],
+                    update_links(Data, OldLinks2, NewLinks2, Rest, Events2)
+            end
+    end.
+
+post_process_device(#{device_id := Id, name :=  Name} = Device) ->
+    #{id => Name,
+      real_id => Id,
+      type => device_type(Device),
+      pcc_address => device_pcc_address(Device),
+      mpls_label => device_mpls_label(Device),
+      status => device_status(Device),
+      endpoints => device_endpoints(Device)}.
+
+device_type(#{device_type := Type}) ->
+    Type.
+
+device_status(#{device_operational_status := 'DEVICEOPERATIONALSTATUS_UNDEFINED'}) ->
+    undefined;
+device_status(#{device_operational_status := 'DEVICEOPERATIONALSTATUS_DISABLED'}) ->
+    disabled;
+device_status(#{device_operational_status := 'DEVICEOPERATIONALSTATUS_ENABLED'}) ->
+    enabled.
+
+device_mpls_label(Device) ->
+    try device_config_value(<<"/te_data">>, Device) of
+        Map when is_map(Map) -> maps:get(<<"mpls_label">>, Map, undefined);
+        _ -> undefined
+    catch error:badarg -> undefined
+    end.
+
+device_pcc_address(Device) ->
+    try device_config_value(<<"/te_data">>, Device) of
+        Map when is_map(Map) ->
+            case maps:get(<<"pcc_address">>, Map, undefined) of
+                AddressBin ->
+                    case inet_parse:address(binary_to_list(AddressBin)) of
+                        {ok, Address} -> Address;
+                        {error,einval} -> undefined
+                    end
+            end;
+        _ -> undefined
+    catch
+        error:badarg -> undefined
+    end.
+
+device_config_value(Key, #{device_config := Config}) ->
+    tfte_util:custom_config(Config, Key).
+
+device_endpoints(Device) ->
+    device_endpoints(Device, []).
+
+device_endpoints(#{device_endpoints := Endpoints}, Acc) ->
+    device_endpoints(Endpoints, Acc);
+device_endpoints([], Acc) ->
+    lists:reverse(Acc);
+device_endpoints([#{name := Name} | Rest], Acc) ->
+    device_endpoints(Rest, [Name | Acc]).
+
+post_process_link_id(#{link_uuid := #{uuid := Name}}) ->
+    Name.
+
+post_process_link(Data, Link) ->
+    #{id => link_id(Link),
+      endpoints => link_endpoints(Data, Link)}.
+
+link_id(#{link_id := Id}) ->
+    post_process_link_id(Id).
+
+link_endpoints(Data, Link) ->
+    link_endpoints(Data, Link, []).
+
+link_endpoints(Data, #{link_endpoint_ids := Endpoints}, Acc) ->
+    link_endpoints(Data, Endpoints, Acc);
+link_endpoints(_Data, [], Acc) ->
+    lists:reverse(Acc);
+link_endpoints(Data, [#{device_id := RealId,
+                        endpoint_uuid := #{uuid := EndpointName}} | Rest], Acc) ->
+    #data{names = Names} = Data,
+    Endpoint = #{
+        device => maps:get(RealId, Names, undefined),
+        endpoint => EndpointName
+    },
+    link_endpoints(Data, Rest, [Endpoint | Acc]).
+
+
+%-- GRPC UNTILITY FUNCTION -----------------------------------------------------
+
+grpc_opts() ->
+    #{channel => context}.
+
+do_subscribe(UUID) ->
+    context_context_service_client:get_topology_events(UUID, grpc_opts()).
+
+get_object(UUID) ->
+    case context_context_service_client:get_topology(UUID, grpc_opts()) of
+        {error, Reason} -> 
+            ?LOG_ERROR("Local error while retrieving the topology object: ~p", [Reason]),
+            error;
+        {error, Reason, _Headers} ->
+            ?LOG_ERROR("Remote error while retrieving the topology object: ~p", [Reason]),
+            error;
+        {ok, Result, _Headers} ->
+            {ok, Result}
+    end.
+
+get_device(UUID) ->
+    case context_context_service_client:get_device(UUID, grpc_opts()) of
+        {error, Reason} -> 
+            ?LOG_ERROR("Local error while retrieving a device object: ~p", [Reason]),
+            error;
+        {error, Reason, _Headers} ->
+            ?LOG_ERROR("Remote error while retrieving a device object: ~p", [Reason]),
+            error;
+        {ok, Result, _Headers} ->
+            {ok, Result}
+    end.
+
+get_link(UUID) ->
+    case context_context_service_client:get_link(UUID, grpc_opts()) of
+        {error, Reason} -> 
+            ?LOG_ERROR("Local error while retrieving a link object: ~p", [Reason]),
+            error;
+        {error, Reason, _Headers} ->
+            ?LOG_ERROR("Remote error while retrieving a link object: ~p", [Reason]),
+            error;
+        {ok, Result, _Headers} ->
+            {ok, Result}
+    end.
diff --git a/src/te/apps/tfte/src/tfte_util.erl b/src/te/apps/tfte/src/tfte_util.erl
new file mode 100644
index 0000000000000000000000000000000000000000..fb058c7cf6b247c13bd95f2f0c5696eec30da8bf
--- /dev/null
+++ b/src/te/apps/tfte/src/tfte_util.erl
@@ -0,0 +1,43 @@
+%%%-----------------------------------------------------------------------------
+%% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%%      http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%%-----------------------------------------------------------------------------
+
+-module(tfte_util).
+
+%%% INCLUDES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include_lib("kernel/include/logger.hrl").
+
+
+%%% EXPORTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% API functions
+-export([custom_config/2]).
+
+
+%%% API FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+custom_config(#{config_rules := Rules}, Key) ->
+    custom_config(Rules, Key);
+custom_config([], _Key) ->
+    undefined;
+custom_config([#{action := 'CONFIGACTION_SET',
+                 config_rule := {custom, Rule}} | Rest], Key) ->
+    case Rule of
+        #{resource_key := Key, resource_value := Value} -> jsx:decode(Value);
+        _ -> custom_config(Rest, Key)
+    end;
+custom_config([_Rule | Rest], Key) ->
+    custom_config(Rest, Key).
diff --git a/src/te/config/dev.config.template b/src/te/config/dev.config.template
new file mode 100644
index 0000000000000000000000000000000000000000..6e21dc0d2d021447cf5eb84869d97b7a3662f58c
--- /dev/null
+++ b/src/te/config/dev.config.template
@@ -0,0 +1,78 @@
+% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+%      http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+[
+    {tfte, [
+        {services, [
+            {te, [{http, "localhost", 10030, []}], #{}}
+        ]}
+    ]},
+
+    {pcep_server, [
+        {handler, {epce_pcep_server_handler, []}}
+    ]},
+
+	{epce, [
+	]},
+
+    {grpcbox, [
+        {servers, [#{
+            grpc_opts => #{
+                service_protos => [te_pb, grpcbox_health_pb, grpcbox_reflection_pb],
+                %client_cert_dir => "",
+                services => #{
+                    'te.TEService' => tfte_te_service,
+                    'grpc.health.v1.Health' => grpcbox_health_service,
+                    'grpc.reflection.v1alpha.ServerReflection' => grpcbox_reflection_service
+                }
+            },
+            transport_opts => #{
+                ssl => false
+                %keyfile => "",
+                %certfile => "",
+                %cacertfile => ""
+            },
+            listen_opts => #{
+                port => 10030,
+                ip => {0,0,0,0}
+            },
+            pool_opts => #{
+                size => 10
+            },
+            server_opts => #{
+                header_table_size => 4096,
+                enable_push => 1,
+                max_concurrent_streams => unlimited,
+                initial_window_size => 65535,
+                max_frame_size => 16384,
+                max_header_list_size => unlimited
+            }
+        }]}
+    ]},
+
+    {kernel, [
+        {logger_level, debug},
+        {logger, [
+            {handler, default, logger_std_h, #{
+                level => debug,
+                filter_default => log,
+                config => #{type => standard_io},
+                formatter => {logger_formatter, #{
+                    legacy_header => false,
+                    single_line => true
+                }}
+            }}
+        ]}
+    ]}
+].
\ No newline at end of file
diff --git a/src/te/config/sys.config.src b/src/te/config/sys.config.src
new file mode 100644
index 0000000000000000000000000000000000000000..edcd4384a3236df42b1e530c8b3a92b96c80e09e
--- /dev/null
+++ b/src/te/config/sys.config.src
@@ -0,0 +1,101 @@
+% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+%      http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+[
+
+    {tfte, [
+        {context, <<"admin">>},
+        {topology, <<"admin">>},
+        {services, [
+            {te, [
+                {http, {env, "TESERVICE_SERVICE_HOST"}, {env, "TESERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {service, [
+                {http, {env, "SERVICESERVICE_SERVICE_HOST"}, {env, "SERVICESERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {monitoring, [
+                {http, {env, "MONITORINGSERVICE_SERVICE_HOST"}, {env, "MONITORINGSERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {compute, [
+                {http, {env, "COMPUTESERVICE_SERVICE_HOST"}, {env, "COMPUTESERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {device, [
+                {http, {env, "DEVICESERVICE_SERVICE_HOST"}, {env, "DEVICESERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {context, [
+                {http, {env, "CONTEXTSERVICE_SERVICE_HOST"}, {env, "CONTEXTSERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}},
+            {automation, [
+                {http, {env, "AUTOMATIONSERVICE_SERVICE_HOST"}, {env, "AUTOMATIONSERVICE_SERVICE_PORT_GRPC"}, []}
+            ], #{}}
+        ]}
+    ]},
+
+    {pcep_server, [
+        {handler, {epce_pcep_server_handler, []}}
+    ]},
+
+	{epce, [
+	]},
+
+    {grpcbox, [
+        {servers, [#{
+            grpc_opts => #{
+                service_protos => [te_pb, grpcbox_health_pb, grpcbox_reflection_pb],
+                %client_cert_dir => "",
+                services => #{
+                    'te.TEService' => tfte_te_service,
+                    'grpc.health.v1.Health' => grpcbox_health_service,
+                    'grpc.reflection.v1alpha.ServerReflection' => grpcbox_reflection_service
+                }
+            },
+            transport_opts => #{
+                ssl => false
+                %keyfile => "",
+                %certfile => "",
+                %cacertfile => ""
+            },
+            listen_opts => #{
+                port => 10030,
+                ip => {0,0,0,0}
+            },
+            pool_opts => #{
+                size => 10
+            },
+            server_opts => #{
+                header_table_size => 4096,
+                enable_push => 1,
+                max_concurrent_streams => unlimited,
+                initial_window_size => 65535,
+                max_frame_size => 16384,
+                max_header_list_size => unlimited
+            }
+        }]}
+    ]},
+
+    {kernel, [
+        {logger_level, ${ERLANG_LOGGER_LEVEL}},
+        {logger, [
+            {handler, default, logger_std_h, #{
+                level => ${ERLANG_LOGGER_LEVEL},
+                filter_default => log,
+                config => #{type => standard_io},
+                formatter => {logger_formatter, #{
+                    legacy_header => false,
+                    single_line => true
+                }}
+            }}
+        ]}
+    ]}
+].
\ No newline at end of file
diff --git a/src/te/config/vm.args.src b/src/te/config/vm.args.src
new file mode 100644
index 0000000000000000000000000000000000000000..899705ce169a8302bdd201751342db58b5c85421
--- /dev/null
+++ b/src/te/config/vm.args.src
@@ -0,0 +1,4 @@
++C multi_time_warp
++sbwt none
+-name ${ERLANG_NODE_NAME}@${ERLANG_NODE_IP}
+-setcookie ${ERLANG_COOKIE}
diff --git a/src/te/rebar.config b/src/te/rebar.config
new file mode 100644
index 0000000000000000000000000000000000000000..01f7a899ee6fb69970bd38d6b75c47450887f313
--- /dev/null
+++ b/src/te/rebar.config
@@ -0,0 +1,54 @@
+% Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+%      http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+{erl_opts, [debug_info]}.
+
+{deps, [
+    grpcbox,
+    jsx,
+    {pcep_server, {git, "https://github.com/stritzinger/pcep_server.git", {branch, "master"}}}
+]}.
+
+{shell, [
+    {config, "config/dev.config"},
+    {apps, [epce, tfte, tfpb, grpcbox]}
+]}.
+
+{project_app_dirs, ["apps/*", "../../proto/src/erlang"]}.
+
+{relx, [
+    {release, {tfte, "1.0.0"}, [
+        runtime_tools,
+        epce,
+        grpcbox,
+        jsx,
+        tfpb,
+        tfte
+    ]},
+    {vm_args_src, "config/vm.args.src"},
+    {sys_config_src, "config/sys.config.src"},
+    {dev_mode, true},
+    {include_erts, false},
+    {extended_start_script, true}
+]}.
+
+{profiles, [
+    {prod, [
+        {relx, [
+            {dev_mode, false},
+            {include_erts, true},
+            {include_src, false}
+        ]}
+    ]}
+]}.
diff --git a/src/te/rebar.lock b/src/te/rebar.lock
new file mode 100644
index 0000000000000000000000000000000000000000..c435b045661e26b84495269c3ba79406990d09fb
--- /dev/null
+++ b/src/te/rebar.lock
@@ -0,0 +1,41 @@
+{"1.2.0",
+[{<<"acceptor_pool">>,{pkg,<<"acceptor_pool">>,<<"1.0.0">>},1},
+ {<<"chatterbox">>,{pkg,<<"ts_chatterbox">>,<<"0.12.0">>},1},
+ {<<"codec_sequencer">>,
+  {git,"https://github.com/stritzinger/codec_sequencer.git",
+       {ref,"fc8760894f7962ef1497bf6ce4247eb75db9d5ca"}},
+  2},
+ {<<"ctx">>,{pkg,<<"ctx">>,<<"0.6.0">>},1},
+ {<<"gproc">>,{pkg,<<"gproc">>,<<"0.8.0">>},1},
+ {<<"grpcbox">>,{pkg,<<"grpcbox">>,<<"0.15.0">>},0},
+ {<<"hpack">>,{pkg,<<"hpack_erl">>,<<"0.2.3">>},2},
+ {<<"jsx">>,{pkg,<<"jsx">>,<<"3.1.0">>},0},
+ {<<"pcep_codec">>,
+  {git,"https://github.com/stritzinger/pcep_codec.git",
+       {ref,"ca5eb0822d9971ec4bcfb427a49b2e516081a126"}},
+  1},
+ {<<"pcep_server">>,
+  {git,"https://github.com/stritzinger/pcep_server.git",
+       {ref,"ea751fa807f4c1f5635f781431fe384610166b0a"}},
+  0},
+ {<<"ranch">>,{pkg,<<"ranch">>,<<"2.0.0">>},1}]}.
+[
+{pkg_hash,[
+ {<<"acceptor_pool">>, <<"43C20D2ACAE35F0C2BCD64F9D2BDE267E459F0F3FD23DAB26485BF518C281B21">>},
+ {<<"chatterbox">>, <<"4E54F199E15C0320B85372A24E35554A2CCFC4342E0B7CD8DAED9A04F9B8EF4A">>},
+ {<<"ctx">>, <<"8FF88B70E6400C4DF90142E7F130625B82086077A45364A78D208ED3ED53C7FE">>},
+ {<<"gproc">>, <<"CEA02C578589C61E5341FCE149EA36CCEF236CC2ECAC8691FBA408E7EA77EC2F">>},
+ {<<"grpcbox">>, <<"97C7126296A091602D372EBF5860A04F7BC795B45B33A984CAD2B8E362774FD8">>},
+ {<<"hpack">>, <<"17670F83FF984AE6CD74B1C456EDDE906D27FF013740EE4D9EFAA4F1BF999633">>},
+ {<<"jsx">>, <<"D12516BAA0BB23A59BB35DCCAF02A1BD08243FCBB9EFE24F2D9D056CCFF71268">>},
+ {<<"ranch">>, <<"FBF3D79661C071543256F9051CAF19D65DAA6DF1CF6824D8F37A49B19A66F703">>}]},
+{pkg_hash_ext,[
+ {<<"acceptor_pool">>, <<"0CBCD83FDC8B9AD2EEE2067EF8B91A14858A5883CB7CD800E6FCD5803E158788">>},
+ {<<"chatterbox">>, <<"6478C161BC60244F41CD5847CC3ACCD26D997883E9F7FACD36FF24533B2FA579">>},
+ {<<"ctx">>, <<"A14ED2D1B67723DBEBBE423B28D7615EB0BDCBA6FF28F2D1F1B0A7E1D4AA5FC2">>},
+ {<<"gproc">>, <<"580ADAFA56463B75263EF5A5DF4C86AF321F68694E7786CB057FD805D1E2A7DE">>},
+ {<<"grpcbox">>, <<"161ABE9E17E7D1982EFA6488ADEAA13C3E847A07984A6E6B224E553368918647">>},
+ {<<"hpack">>, <<"06F580167C4B8B8A6429040DF36CC93BBA6D571FAEAEC1B28816523379CBB23A">>},
+ {<<"jsx">>, <<"0C5CC8FDC11B53CC25CF65AC6705AD39E54ECC56D1C22E4ADB8F5A53FB9427F3">>},
+ {<<"ranch">>, <<"C20A4840C7D6623C19812D3A7C828B2F1BD153EF0F124CB69C54FE51D8A42AE0">>}]}
+].
diff --git a/src/te/tests/deploy_specs.sh b/src/te/tests/deploy_specs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..818fb2b0d69ae63b197a83683e68aed96e50d6e2
--- /dev/null
+++ b/src/te/tests/deploy_specs.sh
@@ -0,0 +1,147 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+export TFS_COMPONENTS="${TFS_COMPONENTS} te"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# Disable skip-build flag to rebuild the Docker images.
+export TFS_SKIP_BUILD=""
+
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# Set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE="crdb"
+
+# Set the external port CockroackDB Postgre SQL interface will be exposed to.
+export CRDB_EXT_PORT_SQL="26257"
+
+# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to.
+export CRDB_EXT_PORT_HTTP="8081"
+
+# Set the database username to be used by Context.
+export CRDB_USERNAME="tfs"
+
+# Set the database user's password to be used by Context.
+export CRDB_PASSWORD="tfs123"
+
+# Set the database name to be used by Context.
+export CRDB_DATABASE="tfs"
+
+# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing.
+# See ./deploy/all.sh or ./deploy/crdb.sh for additional details
+export CRDB_DEPLOY_MODE="single"
+
+# Disable flag for dropping database, if it exists.
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
+
+# Disable flag for re-deploying CockroachDB from scratch.
+export CRDB_REDEPLOY=""
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# Set the namespace where NATS will be deployed.
+export NATS_NAMESPACE="nats"
+
+# Set the external port NATS Client interface will be exposed to.
+export NATS_EXT_PORT_CLIENT="4222"
+
+# Set the external port NATS HTTP Mgmt GUI interface will be exposed to.
+export NATS_EXT_PORT_HTTP="8222"
+
+# Disable flag for re-deploying NATS from scratch.
+export NATS_REDEPLOY=""
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# Set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE="qdb"
+
+# Set the external port QuestDB Postgre SQL interface will be exposed to.
+export QDB_EXT_PORT_SQL="8812"
+
+# Set the external port QuestDB Influx Line Protocol interface will be exposed to.
+export QDB_EXT_PORT_ILP="9009"
+
+# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to.
+export QDB_EXT_PORT_HTTP="9000"
+
+# Set the database username to be used for QuestDB.
+export QDB_USERNAME="admin"
+
+# Set the database user's password to be used for QuestDB.
+export QDB_PASSWORD="quest"
+
+# Set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
+
+# Set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
+
+# Disable flag for dropping tables if they exist.
+export QDB_DROP_TABLES_IF_EXIST="YES"
+
+# Disable flag for re-deploying QuestDB from scratch.
+export QDB_REDEPLOY=""
+
+
+# ----- K8s Observability ------------------------------------------------------
+
+# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to.
+export PROM_EXT_PORT_HTTP="9090"
+
+# Set the external port Grafana HTTP Dashboards will be exposed to.
+export GRAF_EXT_PORT_HTTP="3000"
diff --git a/src/te/tests/netgen-config.yml b/src/te/tests/netgen-config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d037088ce6cafdf95110ce5eadd31e8fb7a302ed
--- /dev/null
+++ b/src/te/tests/netgen-config.yml
@@ -0,0 +1,115 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Directory used for Netgen's operation.
+# Default: "/tmp/netgen"
+# netgen_runstatedir:
+
+# Clean exit.
+# Default: 'false'
+# clean_exit:
+
+# Valgrind parameters.
+# Default: "--tool=memcheck"
+# valgrind_params: "--tool=memcheck --leak-check=full --trace-children=yes"
+# valgrind_params: "--tool=memcheck --leak-check=full"
+# valgrind_params: "--tool=memcheck --leak-check=full --show-leak-kinds=all"
+# valgrind_params: "--tool=callgrind --dump-instr=yes --collect-jumps=yes"
+
+# Perf directory
+# Default: [netgen_runstatedir]/perf
+# perf_dir:
+
+# Plugins configuration.
+plugins:
+  frr:
+    # FRR's sysconfdir (--sysconfdir).
+    # Default: "/etc/frr"
+    # sysconfdir:
+
+    # FRR's localstatedir (--localstatedir).
+    # Default: "/var/run/frr"
+    # localstatedir:
+
+    # FRR's user (--enable-user).
+    # Default: "frr"
+    # user:
+    user: "root"
+
+    # FRR's group (--enable-group).
+    # Default: "frr"
+    # group:
+    group: "root"
+
+    # Directory to store FRR logs.
+    # Default: [netgen_runstatedir]/frrlogs
+    # logdir:
+
+  tcpdump:
+    # Directory to store tcpdump captures.
+    # Default: [netgen_runstatedir]/pcaps
+    # pcap_dir:
+
+    # Filter on which nodes tcpdump should run.
+    # Default: []
+    # whitelist:
+
+    # Filter on which nodes tcpdump should not run.
+    # Default: []
+    # blacklist:
+
+  tmux:
+    # Path of tmux script used to open a shell on all routers.
+    # Default: [netgen_runstatedir]/tmux.sh
+    # file:
+
+    # Panels per node.
+    # Default: 1
+    # panels-per-node:
+
+  bird:
+    # BIRD's sysconfdir (--sysconfdir).
+    # Default: "/etc/bird"
+    # sysconfdir:
+
+    # BIRD's localstatedir (--localstatedir).
+    # Default: "/var/run/bird"
+    # localstatedir:
+
+    # BIRD's user (--enable-user).
+    # Default: "bird"
+    # user:
+
+    # BIRD's group (--enable-group).
+    # Default: "bird"
+    # group:
+
+    # Directory to store BIRD logs.
+    # Default: [netgen_runstatedir]/birdlogs
+    # logdir:
+
+  bgpsimple:
+    # Path to bgp_simple script
+    # Default: "bgp_simple.pl"
+    # path:
+
+  iou:
+    # IOU working directory.
+    # Default: [netgen_runstatedir]/iou
+    # dir:
+
+  dynamips:
+    # dynamips working directory.
+    # Default: [netgen_runstatedir]/dynamips
+    # dir:
diff --git a/src/te/tests/netgen-topology.yml.template b/src/te/tests/netgen-topology.yml.template
new file mode 100644
index 0000000000000000000000000000000000000000..fd21c436324f03e59e37b7cb5cd829245782e133
--- /dev/null
+++ b/src/te/tests/netgen-topology.yml.template
@@ -0,0 +1,548 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+#                          +---------+
+#                          |         |
+#                          |   SRC   |
+#                          | 9.9.9.1 |
+#                          |         |
+#                          +---------+
+#                               |eth-rt1 (.1)
+#                               |
+#                               |10.0.10.0/24
+#                               |
+#                               |eth-src (.2)
+#                          +---------+                      .
+#                          |         |                      .
+#                          |   RT1   |eth-rt1-pce (???)     .
+#                          | 1.1.1.1 +----------------------------------+
+#                          |  16010  |                      .       ??? |
+#                          +---------+                      .           |
+#                               |eth-sw1                    .           |
+#                               |                           .           |
+#                               |                           .           |
+#                               |                           .           |
+#          +---------+          |          +---------+      .           |
+#          |         |          |          |         |      .           |
+#          |   RT2   |eth-sw1   |   eth-sw1|   RT3   |      .           |
+#          | 2.2.2.2 +----------+----------+ 3.3.3.3 |      .           |
+#          |  16020  |     10.0.1.0/24     |  16030  |      .           |
+#          +---------+                     +---------+      .eth-pce-rt1|(???)
+#     eth-rt4-1|  |eth-rt4-2          eth-rt5-1|  |eth-rt5-2.      +----+----+
+#              |  |                            |  |         .      |         |
+#   10.0.2.0/24|  |10.0.3.0/24      10.0.4.0/24|  |10.0.5.0/24     |   PCE   |
+#              |  |                            |  |         .      |   ????  |
+#     eth-rt2-1|  |eth-rt2-2          eth-rt3-1|  |eth-rt3-2.      |         |
+#          +---------+                     +---------+      .      +----+----+
+#          |         |                     |         |      .eth-pce-rt6|(???)
+#          |   RT4   |     10.0.6.0/24     |   RT5   |      .           |
+#          | 4.4.4.4 +---------------------+ 5.5.5.5 |      .           |
+#          |  16040  |eth-rt5       eth-rt4|  16050  |      .           |
+#          +---------+                     +---------+      .           |
+#        eth-rt6|                                |eth-rt6   .           |
+#               |                                |          .           |
+#    10.0.7.0/24|                                |10.0.8.0/24           |
+#               |          +---------+           |          .           |
+#               +----------|         |-----------+          .           |
+#                   eth-rt4|   RT6   |eth-rt5               .           |
+#                          | 6.6.6.6 |eth-rt6-pce (????)    .      ???? |
+#                          |  16060  +----------------------------------+
+#                          +---------+                      .
+#                               |eth-dst (.1)               .
+#                               |                           .
+#                               |10.0.11.0/24
+#                               |
+#                               |eth-rt6 (.2)
+#                          +---------+
+#                          |         |
+#                          |   DST   |
+#                          | 9.9.9.2 |
+#                          |         |
+#                          +---------+
+#
+
+---
+
+routers:
+
+  src:
+    links:
+      lo:
+        ipv4: 9.9.9.1/32
+        ipv6: 2001:db8:1066::1/128
+        mpls: yes
+      eth-rt1:
+        peer: [rt1, eth-src]
+        ipv4: 10.0.10.1/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+    shell: |
+      ip route add 9.9.9.2/32 encap mpls 1111 via inet 10.0.10.2 src 9.9.9.1
+
+  rt1:
+    links:
+      lo:
+        ipv4: 1.1.1.1/32
+        mpls: yes
+      eth-sw1:
+        peer: [sw1, sw1-rt1]
+        ipv4: 10.0.1.1/24
+        mpls: yes
+      eth-src:
+        peer: [src, eth-rt1]
+        ipv4: 10.0.10.2/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      pathd:
+        args: "-M pathd_pcep"
+        config: |
+          debug pathd pcep basic
+          segment-routing
+           traffic-eng
+            pcep
+             pce-config CONFIG
+              source-address ip 1.1.1.1
+             pce PCE
+              pce-initiated
+              address ip ${PCE_IP}
+              config CONFIG
+             pcc
+              peer PCE
+          !
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-sw1
+           ip router isis 1
+           ipv6 router isis 1
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0001.00
+           is-type level-1
+           redistribute ipv4 static level-1
+           redistribute ipv4 connected level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 1.1.1.1/32 index 10 explicit-null
+           segment-routing prefix 2001:db8:1000::1/128 index 11 explicit-null
+          !
+    shell: |
+      ip route add 9.9.9.1/32 dev eth-src
+      ip link add eth-rt1-pce type veth peer name eth-pce-rt1
+      ip addr add ${RT1_PCE_INT_IF_IP}/24 dev eth-rt1-pce
+      ip link set eth-pce-rt1 netns ${PCE_NETNS}
+      ip -n ${PCE_NETNS} addr add ${RT1_PCE_EXT_IF_IP}/24 dev eth-pce-rt1
+      ip link set eth-rt1-pce up
+      ip -n ${PCE_NETNS} link set eth-pce-rt1 up
+      ip route add ${RT1_PCE_EXT_IF_IP}/24 via ${RT1_PCE_INT_IF_IP} dev eth-rt1-pce src 1.1.1.1
+      ip -n ${PCE_NETNS} route add ${RT1_PCE_INT_IF_IP}/32 via ${RT1_PCE_EXT_IF_IP} dev eth-pce-rt1
+      ip -n ${PCE_NETNS} route add 1.1.1.1/32 via ${RT1_PCE_EXT_IF_IP} dev eth-pce-rt1
+
+  rt2:
+    links:
+      lo:
+        ipv4: 2.2.2.2/32
+        ipv6: 2001:db8:1000::2/128
+        mpls: yes
+      eth-sw1:
+        peer: [sw1, sw1-rt2]
+        ipv4: 10.0.1.2/24
+        mpls: yes
+      eth-rt4-1:
+        peer: [rt4, eth-rt2-1]
+        ipv4: 10.0.2.2/24
+        mpls: yes
+      eth-rt4-2:
+        peer: [rt4, eth-rt2-2]
+        ipv4: 10.0.3.2/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-sw1
+           ip router isis 1
+           ipv6 router isis 1
+           isis hello-multiplier 3
+          !
+          interface eth-rt4-1
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt4-2
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0002.00
+           is-type level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 2.2.2.2/32 index 20 no-php-flag
+           segment-routing prefix 2001:db8:1000::2/128 index 21 no-php-flag
+          !
+
+  rt3:
+    links:
+      lo:
+        ipv4: 3.3.3.3/32
+        ipv6: 2001:db8:1000::3/128
+        mpls: yes
+      eth-sw1:
+        peer: [sw1, sw1-rt3]
+        ipv4: 10.0.1.3/24
+        mpls: yes
+      eth-rt5-1:
+        peer: [rt5, eth-rt3-1]
+        ipv4: 10.0.4.3/24
+        mpls: yes
+      eth-rt5-2:
+        peer: [rt5, eth-rt3-2]
+        ipv4: 10.0.5.3/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-sw1
+           ip router isis 1
+           ipv6 router isis 1
+           isis hello-multiplier 3
+          !
+          interface eth-rt5-1
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt5-2
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0003.00
+           is-type level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 3.3.3.3/32 index 30 no-php-flag
+           segment-routing prefix 2001:db8:1000::3/128 index 31 no-php-flag
+          !
+
+  rt4:
+    links:
+      lo:
+        ipv4: 4.4.4.4/32
+        ipv6: 2001:db8:1000::4/128
+        mpls: yes
+      eth-rt2-1:
+        peer: [rt2, eth-rt4-1]
+        ipv4: 10.0.2.4/24
+        mpls: yes
+      eth-rt2-2:
+        peer: [rt2, eth-rt4-2]
+        ipv4: 10.0.3.4/24
+        mpls: yes
+      eth-rt5:
+        peer: [rt5, eth-rt4]
+        ipv4: 10.0.6.4/24
+        mpls: yes
+      eth-rt6:
+        peer: [rt6, eth-rt4]
+        ipv4: 10.0.7.4/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-rt2-1
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt2-2
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt5
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt6
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0004.00
+           is-type level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 4.4.4.4/32 index 40 no-php-flag
+           segment-routing prefix 2001:db8:1000::4/128 index 41 no-php-flag
+          !
+
+  rt5:
+    links:
+      lo:
+        ipv4: 5.5.5.5/32
+        ipv6: 2001:db8:1000::5/128
+        mpls: yes
+      eth-rt3-1:
+        peer: [rt3, eth-rt5-1]
+        ipv4: 10.0.4.5/24
+        mpls: yes
+      eth-rt3-2:
+        peer: [rt3, eth-rt5-2]
+        ipv4: 10.0.5.5/24
+        mpls: yes
+      eth-rt4:
+        peer: [rt4, eth-rt5]
+        ipv4: 10.0.6.5/24
+        mpls: yes
+      eth-rt6:
+        peer: [rt6, eth-rt5]
+        ipv4: 10.0.8.5/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-rt3-1
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt3-2
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt4
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt6
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0005.00
+           is-type level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 5.5.5.5/32 index 50 no-php-flag
+           segment-routing prefix 2001:db8:1000::5/128 index 51 no-php-flag
+          !
+
+  rt6:
+    links:
+      lo:
+        ipv4: 6.6.6.6/32
+        ipv6: 2001:db8:1000::6/128
+        mpls: yes
+      eth-rt4:
+        peer: [rt4, eth-rt6]
+        ipv4: 10.0.7.6/24
+        mpls: yes
+      eth-rt5:
+        peer: [rt5, eth-rt6]
+        ipv4: 10.0.8.6/24
+        mpls: yes
+      eth-dst:
+        peer: [dst, eth-rt6]
+        ipv4: 10.0.11.1/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+      pathd:
+        args: "-M pathd_pcep"
+        config: |
+          debug pathd pcep
+          segment-routing
+           traffic-eng
+            pcep
+             pce-config CONFIG
+              source-address ip 6.6.6.6
+             pce PCE
+              pce-initiated
+              address ip ${PCE_IP}
+              config CONFIG
+             pcc
+              peer PCE
+          !
+      isisd:
+        run: yes
+        config: |
+          interface lo
+           ip router isis 1
+           ipv6 router isis 1
+           isis passive
+          !
+          interface eth-rt4
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          interface eth-rt5
+           ip router isis 1
+           ipv6 router isis 1
+           isis network point-to-point
+           isis hello-multiplier 3
+          !
+          router isis 1
+           net 49.0000.0000.0000.0006.00
+           is-type level-1
+           redistribute ipv4 static level-1
+           redistribute ipv4 connected level-1
+           topology ipv6-unicast
+           segment-routing on
+           segment-routing global-block 16000 23999
+           segment-routing node-msd 8
+           segment-routing prefix 6.6.6.6/32 index 60 explicit-null
+           segment-routing prefix 2001:db8:1000::6/128 index 61 explicit-null
+          !
+    shell: |
+      ip route add 9.9.9.2/32 dev eth-dst
+      ip link add eth-rt6-pce type veth peer name eth-pce-rt6
+      ip addr add ${RT6_PCE_INT_IF_IP}/24 dev eth-rt6-pce
+      ip link set eth-pce-rt6 netns ${PCE_NETNS}
+      ip -n ${PCE_NETNS} addr add ${RT6_PCE_EXT_IF_IP}/24 dev eth-pce-rt6
+      ip link set eth-rt6-pce up
+      ip -n ${PCE_NETNS} link set eth-pce-rt6 up
+      ip route add ${RT6_PCE_EXT_IF_IP}/24 via ${RT6_PCE_INT_IF_IP} dev eth-rt6-pce src 6.6.6.6
+      ip -n ${PCE_NETNS} route add ${RT6_PCE_INT_IF_IP}/32 via ${RT6_PCE_EXT_IF_IP} dev eth-pce-rt6
+      ip -n ${PCE_NETNS} route add 6.6.6.6/32 via ${RT6_PCE_EXT_IF_IP} dev eth-pce-rt6
+
+  dst:
+    links:
+      lo:
+        ipv4: 9.9.9.2/32
+        ipv6: 2001:db8:1066::2/128
+        mpls: yes
+      eth-rt6:
+        peer: [rt6, eth-dst]
+        ipv4: 10.0.11.2/24
+        mpls: yes
+    frr:
+      zebra:
+        run: yes
+        config:
+    shell: |
+      ip route add 9.9.9.1/32 encap mpls 6666 via inet 10.0.11.1
+
+switches:
+  sw1:
+    links:
+      sw1-rt1:
+        peer: [rt1, rt1-sw1]
+      sw1-rt2:
+        peer: [rt2, rt2-sw1]
+      sw1-rt3:
+        peer: [rt3, rt3-sw1]
+
+frr:
+  #perf: yes
+  #valgrind: yes
+  base-configs:
+    all: |
+      hostname %(node)
+      password 1
+      log file %(logdir)/%(node)-%(daemon).log
+      log commands
+    zebra: |
+      debug zebra kernel
+      debug zebra packet
+      debug zebra mpls
+    isisd: |
+      debug isis events
+      debug isis route-events
+      debug isis spf-events
+      debug isis sr-events
+      debug isis lsp-gen
diff --git a/src/te/tests/service-descriptors.json b/src/te/tests/service-descriptors.json
new file mode 100644
index 0000000000000000000000000000000000000000..15023ac9da8ff443bad6274af9de8246db524358
--- /dev/null
+++ b/src/te/tests/service-descriptors.json
@@ -0,0 +1,24 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}},
+                "service_uuid": {"uuid": "2c025055-bf6c-4250-8560-cf62f2d29e72"}
+            },
+            "service_type": 4, "service_status": {"service_status": 1},
+            "service_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid":"RT1"}}, "endpoint_uuid": {"uuid":"eth-src"}},
+                {"device_id": {"device_uuid": {"uuid":"RT6"}}, "endpoint_uuid": {"uuid":"eth-dst"}}
+            ],
+            "service_constraints": [],
+            "service_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "/lsp-fw", "resource_value": {
+                    "symbolic_name": "foo", "binding_label": 1111
+                }}},
+                {"action": 1, "custom": {"resource_key": "/lsp-bw", "resource_value": {
+                    "symbolic_name": "bar", "binding_label": 6666
+                }}}
+            ]}
+        }
+    ]
+}
diff --git a/src/te/tests/start-testbed.sh b/src/te/tests/start-testbed.sh
new file mode 100755
index 0000000000000000000000000000000000000000..07a30e091252f753ff3c89c65378c6bbcde8bae3
--- /dev/null
+++ b/src/te/tests/start-testbed.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+ROOTDIR="$( cd $( dirname $0 ); pwd )"
+RUNDIR="$( pwd )"
+NETGENDIR="${RUNDIR}/netgen"
+
+if [[ ! -f "${NETGENDIR}/exe/netgen" ]]; then
+    echo "Failed to find Netgen binary at ${NETGENDIR}/exe/netgen"
+    exit 1
+fi
+
+PCE_IP=$( kubectl --namespace tfs get $(kubectl --namespace tfs get pods --selector=app=teservice -o name) --template '{{.status.podIP}}' )
+echo "Teraflow PCE IP address: $PCE_IP"
+NAMESPACES=$( ip netns list | cut -d' ' -f1 )
+PCE_NETNS=""
+for n in $NAMESPACES; do
+    if sudo ip -n $n addr list | grep $PCE_IP > /dev/null; then
+        echo "Teraflow TE service namespace: $n"
+        PCE_NETNS=$n
+        break
+    fi    
+done
+if [[ -z $PCE_NETNS ]]; then
+    echo "Teraflow network namespace for TE service not found"
+    exit1
+fi
+
+IFS=. read PCE_IP1 PCE_IP2 PCE_IP3 PCE_IP4 <<< "$PCE_IP"
+
+export PCE_IP
+export PCE_NETNS
+export RT1_PCE_INT_IF_IP="$PCE_IP1.$PCE_IP2.$PCE_IP3.10"
+export RT1_PCE_EXT_IF_IP="$PCE_IP1.$PCE_IP2.$PCE_IP3.11"
+export RT6_PCE_INT_IF_IP="$PCE_IP1.$PCE_IP2.$PCE_IP3.12"
+export RT6_PCE_EXT_IF_IP="$PCE_IP1.$PCE_IP2.$PCE_IP3.13"
+
+cp "${ROOTDIR}/netgen-config.yml" "${RUNDIR}/config.yml"
+cat "${ROOTDIR}/netgen-topology.yml.template" | envsubst > "${RUNDIR}/topology.yml"
+
+sudo -i bash -c "\
+    cd ${RUNDIR}/netgen;\
+    sysctl -w net.ipv4.conf.all.rp_filter=0;\
+    PATH=/usr/lib/frr:\$PATH ./exe/netgen ../topology.yml -c ../config.yml"
diff --git a/src/te/tests/test_te_service.py b/src/te/tests/test_te_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..6237049d4ed7c69f2a6f12efc1ce0365fca7133b
--- /dev/null
+++ b/src/te/tests/test_te_service.py
@@ -0,0 +1,108 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Simple script to test GRPC calls to the TE service.
+# First get the TE service IP using:
+# > kubectl -n tfs get services
+# Change it in this script and run with:
+# > PYTHONPATH=./src python test_te_service.py
+
+import json, sys
+from common.proto.context_pb2 import ConfigActionEnum, Service, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from service.client.TEServiceClient import TEServiceClient
+
+#  {"name": "", 
+#     "service_config": {
+#        "config_rules": [
+#           {
+#             "action": "CONFIGACTION_SET",
+#              "custom": {
+#                 "resource_key": "/lsp-fw",
+#                 "resource_value": "{\n\"binding_label\": 1111,\n\"symbolic_name\": \"foo\"\n}"}},
+#           {
+#              "action": "CONFIGACTION_SET",
+#              "custom": {
+#                 "resource_key": "/lsp-bw",
+#                 "resource_value": "{\n\"binding_label\": 6666,\n\"symbolic_name\": \"bar\"\n}"}}]},
+#        "service_constraints": [],
+#        "service_endpoint_ids": [
+#           {"device_id": {"device_uuid": {"uuid": "RT1"}}, "endpoint_uuid": {"uuid": "eth-src"}},
+#           {"device_id": {"device_uuid": {"uuid": "RT6"}}, "endpoint_uuid": {"uuid": "eth-dst"}}],
+#        "service_id": {"context_id": {"context_uuid": {"uuid": "admin"}},
+#        "service_uuid": {"uuid": "2c025055-bf6c-4250-8560-cf62f2d29e72"}},
+#        "service_status": {"service_status": "SERVICESTATUS_PLANNED"},
+#        "service_type": "SERVICETYPE_TE"}
+
+service = Service()
+service.service_id.context_id.context_uuid.uuid = 'admin'
+service.service_id.service_uuid.uuid = 'test-te-service'
+
+service.service_type = ServiceTypeEnum.SERVICETYPE_TE
+service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED
+
+# SRC Endpoint:
+src_endpoint_id = service.service_endpoint_ids.add()
+src_endpoint_id.device_id.device_uuid.uuid = 'RT1'
+src_endpoint_id.endpoint_uuid.uuid = 'eth-src'
+
+# DST Endpoint:
+dst_endpoint_id = service.service_endpoint_ids.add()
+dst_endpoint_id.device_id.device_uuid.uuid = 'RT6'
+dst_endpoint_id.endpoint_uuid.uuid = 'eth-dst'
+
+# # Capacity SLA
+# sla_capacity = service.service_constraints.add()
+# sla_capacity.sla_capacity.capacity_gbps = 10.0
+
+# # Latency SLA
+# sla_latency = service.service_constraints.add()
+# sla_latency.sla_latency.e2e_latency_ms = 20.0
+
+# Example config rules:
+config_rule_1 = service.service_config.config_rules.add()
+config_rule_1.action = ConfigActionEnum.CONFIGACTION_SET
+config_rule_1.custom.resource_key = '/lsp-fw'
+config_rule_1.custom.resource_value = json.dumps({
+    'binding_label': 1111, 'symbolic_name': "foo"
+})
+
+config_rule_2 = service.service_config.config_rules.add()
+config_rule_2.action = ConfigActionEnum.CONFIGACTION_SET
+config_rule_2.custom.resource_key = '/lsp-bw'
+config_rule_2.custom.resource_value = json.dumps({
+    'binding_label': 6666, 'symbolic_name': "bar"
+})
+
+def main():
+    # Connect:
+    te_service_client = TEServiceClient(host='XXX.XXX.XXX.XXX', port=10030)
+
+    # RequestLSP
+    print('request:', grpc_message_to_json_string(service))
+    service_status = te_service_client.RequestLSP(service)
+    print('response:', grpc_message_to_json_string(service_status))
+
+    # DeleteLSP
+    #print('request:', grpc_message_to_json_string(service))
+    #service_status = te_service_client.DeleteLSP(service)
+    #print('response:', grpc_message_to_json_string(service_status))
+
+    # Close:
+    te_service_client.close()
+
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/te/tests/topology-descriptors.json b/src/te/tests/topology-descriptors.json
new file mode 100644
index 0000000000000000000000000000000000000000..a34d8ce09b3367bce0943ef3de8565baec776842
--- /dev/null
+++ b/src/te/tests/topology-descriptors.json
@@ -0,0 +1,195 @@
+{
+    "contexts": [
+        {
+            "context_id": {"context_uuid": {"uuid": "admin"}},
+            "topology_ids": [],
+            "service_ids": []
+        }
+    ],
+    "topologies": [
+        {
+            "topology_id": {"topology_uuid": {"uuid": "admin"}, "context_id": {"context_uuid": {"uuid": "admin"}}},
+            "device_ids": [
+                {"device_uuid": {"uuid": "SW1"}},
+                {"device_uuid": {"uuid": "RT1"}},
+                {"device_uuid": {"uuid": "RT2"}},
+                {"device_uuid": {"uuid": "RT3"}},
+                {"device_uuid": {"uuid": "RT4"}},
+                {"device_uuid": {"uuid": "RT5"}},
+                {"device_uuid": {"uuid": "RT6"}}
+            ],
+            "link_ids": [
+                {"link_uuid": {"uuid": "RT1/SW1"}},
+                {"link_uuid": {"uuid": "RT2/SW1"}},
+                {"link_uuid": {"uuid": "RT3/SW1"}},
+                {"link_uuid": {"uuid": "RT2/RT4/1"}},
+                {"link_uuid": {"uuid": "RT2/RT4/2"}},
+                {"link_uuid": {"uuid": "RT3/RT5/1"}},
+                {"link_uuid": {"uuid": "RT3/RT5/2"}},
+                {"link_uuid": {"uuid": "RT4/RT5"}},
+                {"link_uuid": {"uuid": "RT4/RT6"}},
+                {"link_uuid": {"uuid": "RT5/RT6"}}
+            ]
+        }
+    ],
+    "devices": [
+        {
+            "device_id": {"device_uuid": {"uuid": "SW1"}}, "device_type": "emu-packet-switch",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "df8bb169-2013-4b82-9455-69777f7a01d6"},
+                    {"sample_types": [], "type": "copper", "uuid": "061119c1-2aa4-48e9-be64-3ddf465fc80a"},
+                    {"sample_types": [], "type": "copper", "uuid": "495ea3f8-e67f-46a0-84bd-a230a4b7067d"}
+                ]}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT1"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "1.1.1.1"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-src"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-sw1"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16010, "pcc_address": "1.1.1.1"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT2"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "2.2.2.2"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-sw1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt4-1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt4-2"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16020, "pcc_address": "2.2.2.2"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT3"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "3.3.3.3"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-sw1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt5-1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt5-2"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16030, "pcc_address": "3.3.3.3"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT4"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "4.4.4.4"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt2-1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt2-2"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt5"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt6"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16040, "pcc_address": "4.4.4.4"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT5"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "5.5.5.5"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt3-1"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt3-2"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt4"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt6"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16050, "pcc_address": "5.5.5.5"}}}
+            ]}
+        },
+        {
+            "device_id": {"device_uuid": {"uuid": "RT6"}}, "device_type": "emu-packet-router",
+            "device_operational_status": 2, "device_drivers": [0], "device_endpoints": [],
+            "device_config": {"config_rules": [
+                {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "6.6.6.6"}},
+                {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}},
+                {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt4"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-rt5"},
+                    {"sample_types": [], "type": "copper", "uuid": "eth-dst"}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/te_data", "resource_value": {"mpls_label": 16060, "pcc_address": "6.6.6.6"}}}
+            ]}
+        }
+    ],
+    "links": [
+        {
+            "link_id": {"link_uuid": {"uuid": "RT1/SW1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT1"}}, "endpoint_uuid": {"uuid": "eth-sw1"}},
+                {"device_id": {"device_uuid": {"uuid": "SW1"}}, "endpoint_uuid": {"uuid": "df8bb169-2013-4b82-9455-69777f7a01d6"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT2/SW1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT2"}}, "endpoint_uuid": {"uuid": "eth-sw1"}},
+                {"device_id": {"device_uuid": {"uuid": "SW1"}}, "endpoint_uuid": {"uuid": "061119c1-2aa4-48e9-be64-3ddf465fc80a"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT3/SW1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT3"}}, "endpoint_uuid": {"uuid": "eth-sw1"}},
+                {"device_id": {"device_uuid": {"uuid": "SW1"}}, "endpoint_uuid": {"uuid": "495ea3f8-e67f-46a0-84bd-a230a4b7067d"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT2/RT4/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT2"}}, "endpoint_uuid": {"uuid": "eth-rt4-1"}},
+                {"device_id": {"device_uuid": {"uuid": "RT4"}}, "endpoint_uuid": {"uuid": "eth-rt2-1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT2/RT4/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT2"}}, "endpoint_uuid": {"uuid": "eth-rt4-2"}},
+                {"device_id": {"device_uuid": {"uuid": "RT4"}}, "endpoint_uuid": {"uuid": "eth-rt2-2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT3/RT5/1"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT3"}}, "endpoint_uuid": {"uuid": "eth-rt5-1"}},
+                {"device_id": {"device_uuid": {"uuid": "RT5"}}, "endpoint_uuid": {"uuid": "eth-rt3-1"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT3/RT5/2"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT3"}}, "endpoint_uuid": {"uuid": "eth-rt5-2"}},
+                {"device_id": {"device_uuid": {"uuid": "RT5"}}, "endpoint_uuid": {"uuid": "eth-rt3-2"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT4/RT5"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT4"}}, "endpoint_uuid": {"uuid": "eth-rt5"}},
+                {"device_id": {"device_uuid": {"uuid": "RT5"}}, "endpoint_uuid": {"uuid": "eth-rt4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT4/RT6"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT4"}}, "endpoint_uuid": {"uuid": "eth-rt6"}},
+                {"device_id": {"device_uuid": {"uuid": "RT6"}}, "endpoint_uuid": {"uuid": "eth-rt4"}}
+            ]
+        },
+        {
+            "link_id": {"link_uuid": {"uuid": "RT5/RT6"}}, "link_endpoint_ids": [
+                {"device_id": {"device_uuid": {"uuid": "RT5"}}, "endpoint_uuid": {"uuid": "eth-rt6"}},
+                {"device_id": {"device_uuid": {"uuid": "RT6"}}, "endpoint_uuid": {"uuid": "eth-rt5"}}
+            ]
+        }
+    ]
+}
diff --git a/src/te/tutorial/1-6-setup-erlang-environmnet.md b/src/te/tutorial/1-6-setup-erlang-environmnet.md
new file mode 100644
index 0000000000000000000000000000000000000000..2d1519a6f3dc0e4bb686bfa107c0ca5b16eb6cd2
--- /dev/null
+++ b/src/te/tutorial/1-6-setup-erlang-environmnet.md
@@ -0,0 +1,56 @@
+# 1.5. Setup Erlang Environment
+
+First we need to install Erlang. There is multiple way, for development we will
+be using *ASDF*, a tool that allows the installation of multiple version of Erlang
+at the same time, and switch from one version to the other at will.
+
+
+## 1.5.1. Setup Erlang using asdf
+
+First, install any missing dependencies:
+
+    sudo apt install curl git autoconf libncurses-dev build-essential m4 libssl-dev 
+
+Download *ASDF* tool to the local account:
+
+    git clone https://github.com/asdf-vm/asdf.git ~/.asdf --branch v0.10.2
+
+Make *ASDF* activate on login by adding these lines at the end of the `~/.bashrc` file:
+
+    . $HOME/.asdf/asdf.sh
+    . $HOME/.asdf/completions/asdf.bash
+
+Logout and log back in to activate *ASDF*.
+
+*ASDF* supports multiple tools by installing there corresponding plugins.
+Install *ASDF* plugin for Erlang:
+
+    asdf plugin add erlang https://github.com/asdf-vm/asdf-erlang.git
+
+Install a version of Erlang:
+
+    asdf install erlang 24.3.4.2
+
+Activate Erlang locally for TFS controller. This will create a local file
+called `.tool-versions` defining which version of the tools to use when
+running under the current directory:
+
+    cd tfs-ctrl/
+    asdf local erlang 24.3.4.2
+
+Erlang projects uses a build tool called rebar3. It is used to manager project
+dependenecies, compile a project and generate project releases.
+Install rebar3 localy from source:
+
+    cd ~
+    git clone https://github.com/erlang/rebar3.git
+    cd rebar3
+    asdf local erlang 24.3.4.2
+    ./bootstrap
+    ./rebar3 local install
+
+Update `~/.bashrc` to use rebar3 by adding this line at the end:
+
+    export PATH=$HOME/.cache/rebar3/bin:$PATH
+
+Logout and log back in.
diff --git a/src/te/tutorial/2-6-te-demo.md b/src/te/tutorial/2-6-te-demo.md
new file mode 100644
index 0000000000000000000000000000000000000000..c53a60c51cf20717e51fb514c5b02d6594ce7904
--- /dev/null
+++ b/src/te/tutorial/2-6-te-demo.md
@@ -0,0 +1,130 @@
+# 2.6. Traffic Engineering Demo
+
+## Setup Test-Bed
+
+### Setup libyang
+
+    $ sudo apt update
+    $ sudo apt-get install cmake libpcre2-dev git make build-essential
+    $ mkdir -p ~/testbed
+    $ cd ~/testbed
+    $ git clone git@github.com:CESNET/libyang.git
+    $ cd libyang
+    $ git checkout v2.0.0
+    $ mkdir build; cd build
+    $ cmake -D CMAKE_INSTALL_PREFIX:PATH=/usr -D CMAKE_BUILD_TYPE:String="Release" ..
+    $ make
+    $ sudo make install
+
+
+### Setup Free Range Routing
+
+    $ sudo apt update
+    $ sudo apt-get install git autoconf automake libtool make libreadline-dev texinfo pkg-config libpam0g-dev libjson-c-dev bison flex libc-ares-dev python3-dev python3-sphinx install-info build-essential libsnmp-dev perl libcap-dev python2 libelf-dev libunwind-dev protobuf-c-compiler libprotobuf-c-dev libsystemd-dev
+    $ mkdir -p ~/testbed
+    $ cd ~/testbed
+    $ git clone git@github.com:opensourcerouting/frr.git
+    $ cd frr
+    $ curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py
+    $ sudo python2 ./get-pip.py
+    $ export CFLAGS="-I /usr/local/include -g -O2"
+    $ sudo rm -rf /usr/lib/frr
+    $ sudo rm -rf /var/run/frr
+    $ sudo mkdir -p /etc/frr
+    $ sudo mkdir -p /var/run/frr
+    $ sudo chown -R root:root /etc/frr
+    $ ./bootstrap.sh
+    $ ./configure \
+        --prefix=/usr \
+        --includedir=\${prefix}/include \
+        --enable-exampledir=\${prefix}/share/doc/frr/examples \
+        --bindir=\${prefix}/bin \
+        --sbindir=\${prefix}/lib/frr \
+        --libdir=\${prefix}/lib/frr \
+        --libexecdir=\${prefix}/lib/frr \
+        --localstatedir=/var/run/frr \
+        --sysconfdir=/etc/frr \
+        --with-moduledir=\${prefix}/lib/frr/modules \
+        --enable-configfile-mask=0640 \
+        --enable-logfile-mask=0640 \
+        --enable-snmp=agentx \
+        --enable-multipath=64 \
+        --enable-user=root \
+        --enable-group=root \
+        --enable-vty-group=root \
+        --enable-vtysh \
+        --with-pkg-git-version \
+        --with-pkg-extra-version=-MyOwnFRRVersion \
+        --enable-systemd=yes \
+        --enable-config-rollbacks \
+        --enable-pathd \
+        --enable-pcep
+    $ make
+    $ sudo make install
+
+
+### Setup NetGen
+
+    $ sudo apt update
+    $ sudo apt-get install git ruby ruby-dev tmux gettext-base
+    $ mkdir -p ~/testbed
+    $ cd ~/testbed
+    $ git clone git@github.com:sylane/netgen.git
+    $ cd netgen
+    $ git checkout teraflow
+    $ sudo gem install bundler:1.15
+    $ bundle _1.15_ install
+
+
+### Run the Test-Bed
+
+First load the [teraflow configuration file](~/tfs-ctrl/src/te/tests/topology-descriptors.json) using the webui.
+
+In first console:
+    $ cd ~/testbed
+    $ ../tfs-ctrl/src/te/tests/start-testbed.sh
+
+Then in second console:
+    $ sudo -i
+    # cd /tmp/negen
+    # ./tmux.sh
+
+Be sure that both PCC connected to the TE service before going further.
+This can be done by looking at the TE service log:
+
+    $ kubectl --namespace tfs logs $(kubectl --namespace tfs get pods --selector=app=teservice -o name) -c server
+
+### Setup a flow from the Erlang console
+
+We will setup two unidirectional flow between router 1 and 6.
+We will use the binding label 1111 for the flow from router 1 to router 6, and the binding label 6666 for the flow from router 6 to router 1.
+
+    $ kubectl --namespace tfs exec -ti $(kubectl --namespace tfs get pods --selector=app=teservice -o name) -c server -- /tfte/bin/tfte remote_console
+    1> {ok, Flow1to6} = epce_server:initiate_flow(<<"foo">>, {1, 1, 1, 1}, {6, 6, 6, 6}, 1111).
+    2> {ok, Flow6to1} = epce_server:initiate_flow(<<"bar">>, {6, 6, 6, 6}, {1, 1, 1, 1}, 6666).
+
+Another option is to use the router names:
+
+    1> {ok, Flow1to6} = epce_server:initiate_flow(<<"foo">>, <<"RT1">>, <<"RT6">>, 1111).
+    2> {ok, Flow6to1} = epce_server:initiate_flow(<<"bar">>, <<"RT6">>, <<"RT1">>, 6666).
+
+Now if we go to the tmux session src (Ctrl-B 0) we can ping dst:
+
+    $ ping 9.9.9.2
+
+From the Erlang console we can update the initiated flows to change the path the packets are flowing through:
+
+    3> epce_server:update_flow(Flow6to1, [16050, 16030, 16010]).
+
+### Setup a flow using the GRPC test script
+
+This does the same as the the setup from the Erlang console, but through GRPC.
+After deploying Teraflow (with te), get the te service ip using:
+
+    $ kubectl -n tfs get services
+
+Replace the IP in the python script `src/te/tests/test_te_service.py`.
+Be sure the topology as been loaded, and netgen started as described previously,
+and run the following command from the root of the teraflow controller:
+
+    $ PYTHONPATH=./src python src/te/tests/test_te_service.py
diff --git a/src/tests/ofc22/descriptors_emulated.json b/src/tests/ofc22/descriptors_emulated.json
index b68b9636d58d9c80c4774e4ade557f83796ac5b5..1e16b71169d721fe1de0befe212e187861ddf2de 100644
--- a/src/tests/ofc22/descriptors_emulated.json
+++ b/src/tests/ofc22/descriptors_emulated.json
@@ -15,7 +15,9 @@
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
                     {"uuid": "13/0/0", "type": "optical", "sample_types": []},
                     {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
-                ]}}}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/0/0]/settings", "resource_value": {"name": "13/0/0", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/1/2]/settings", "resource_value": {"name": "13/1/2", "enabled": true}}}
             ]}
         },
         {
@@ -27,7 +29,9 @@
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
                     {"uuid": "13/0/0", "type": "optical", "sample_types": []},
                     {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
-                ]}}}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/0/0]/settings", "resource_value": {"name": "13/0/0", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/1/2]/settings", "resource_value": {"name": "13/1/2", "enabled": true}}}
             ]}
         },
         {
@@ -39,7 +43,9 @@
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
                     {"uuid": "13/0/0", "type": "optical", "sample_types": []},
                     {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
-                ]}}}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/0/0]/settings", "resource_value": {"name": "13/0/0", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/1/2]/settings", "resource_value": {"name": "13/1/2", "enabled": true}}}
             ]}
         },
         {
@@ -51,7 +57,9 @@
                 {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [
                     {"uuid": "13/0/0", "type": "optical", "sample_types": []},
                     {"uuid": "13/1/2", "type": "copper",  "sample_types": [101, 102, 201, 202]}
-                ]}}}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/0/0]/settings", "resource_value": {"name": "13/0/0", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[13/1/2]/settings", "resource_value": {"name": "13/1/2", "enabled": true}}}
             ]}
         },
         {
@@ -65,7 +73,11 @@
                     {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical", "sample_types": []},
                     {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical", "sample_types": []},
                     {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical", "sample_types": []}
-                ]}}}
+                ]}}},
+                {"action": 1, "custom": {"resource_key": "/interface[aade6001-f00b-5e2f-a357-6a0a9d3de870]/settings", "resource_value": {"name": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[eb287d83-f05e-53ec-ab5a-adf6bd2b5418]/settings", "resource_value": {"name": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[0ef74f99-1acc-57bd-ab9d-4b958b06c513]/settings", "resource_value": {"name": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "enabled": true}}},
+                {"action": 1, "custom": {"resource_key": "/interface[50296d99-58cc-5ce7-82f5-fc8ee4eec2ec]/settings", "resource_value": {"name": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "enabled": true}}}
             ]}
         }
     ],
diff --git a/src/tests/scenario3/l3/deploy_specs.sh b/src/tests/scenario3/l3/deploy_specs.sh
index c3c9122b8594908c9d9f7d9a56daa4f8d0d8cf52..8c8264fca75d471c3bbbf0cb523c7a17bcffa5a0 100644
--- a/src/tests/scenario3/l3/deploy_specs.sh
+++ b/src/tests/scenario3/l3/deploy_specs.sh
@@ -20,7 +20,23 @@
 export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/"
 
 # Set the list of components, separated by spaces, you want to build images for, and deploy.
-export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator monitoring automation l3_attackmitigator l3_centralizedattackdetector"
+#export TFS_COMPONENTS="context device pathcomp service slice compute webui load_generator"
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Uncomment to activate Monitoring
+export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring"
+
+# Uncomment to activate Automation and Policy Manager
+#export TFS_COMPONENTS="${TFS_COMPONENTS} automation policy"
+
+# Uncomment to activate Optical CyberSecurity
+#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager"
+
+# Uncomment to activate L3 CyberSecurity
+export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector"
+
+# Uncomment to activate TE
+#export TFS_COMPONENTS="${TFS_COMPONENTS} te"
 
 # Set the tag you want to use for your images.
 export TFS_IMAGE_TAG="dev"
@@ -29,7 +45,13 @@ export TFS_IMAGE_TAG="dev"
 export TFS_K8S_NAMESPACE="tfs"
 
 # Set additional manifest files to be applied after the deployment
-export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml manifests/servicemonitors.yaml"
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml"
+
+# Uncomment to monitor performance of components
+export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml"
+
+# Uncomment when deploying Optical CyberSecurity
+#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml"
 
 # Set the new Grafana admin password
 export TFS_GRAFANA_PASSWORD="admin123+"
@@ -66,7 +88,7 @@ export CRDB_DEPLOY_MODE="single"
 export CRDB_DROP_DATABASE_IF_EXISTS="YES"
 
 # Disable flag for re-deploying CockroachDB from scratch.
-export CRDB_REDEPLOY="YES"
+export CRDB_REDEPLOY=""
 
 
 # ----- NATS -------------------------------------------------------------------
@@ -81,7 +103,7 @@ export NATS_EXT_PORT_CLIENT="4222"
 export NATS_EXT_PORT_HTTP="8222"
 
 # Disable flag for re-deploying NATS from scratch.
-export NATS_REDEPLOY="YES"
+export NATS_REDEPLOY=""
 
 
 # ----- QuestDB ----------------------------------------------------------------
@@ -114,7 +136,7 @@ export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
 export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
-export QDB_REDEPLOY="YES"
+export QDB_REDEPLOY=""
 
 
 # ----- K8s Observability ------------------------------------------------------