diff --git a/my_deploy.sh b/my_deploy.sh
index a048edb30b66791d5405961b41faf2443f9d51e1..178c8e6fd1445aa062d1920f82e8024bb037e102 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -134,7 +134,7 @@ export CRDB_PASSWORD="tfs123"
 export CRDB_DEPLOY_MODE="single"
 
 # Disable flag for dropping database, if it exists.
-export CRDB_DROP_DATABASE_IF_EXISTS=""
+export CRDB_DROP_DATABASE_IF_EXISTS="YES"
 
 # Disable flag for re-deploying CockroachDB from scratch.
 export CRDB_REDEPLOY=""
@@ -186,7 +186,7 @@ export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis"
 export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups"
 
 # Disable flag for dropping tables if they exist.
-export QDB_DROP_TABLES_IF_EXIST=""
+export QDB_DROP_TABLES_IF_EXIST="YES"
 
 # Disable flag for re-deploying QuestDB from scratch.
 export QDB_REDEPLOY=""
diff --git a/proto/context.proto b/proto/context.proto
index 01e096233e364be8ad4e3810e7619e8f522e66e6..3e1c60ccbee5443d3cf8f91d812ad1ff3966908a 100644
--- a/proto/context.proto
+++ b/proto/context.proto
@@ -17,6 +17,7 @@ package context;
 
 import "acl.proto";
 import "kpi_sample_types.proto";
+import "pon_access.proto";
 
 service ContextService {
   rpc ListContextIds     (Empty         ) returns (       ContextIdList   ) {}
@@ -226,6 +227,7 @@ enum DeviceDriverEnum {
   DEVICEDRIVER_IETF_L3VPN = 13;
   DEVICEDRIVER_IETF_SLICE = 14;
   DEVICEDRIVER_NCE = 15;
+  DEVICEDRIVER_PON = 16;
 }
 
 enum DeviceOperationalStatusEnum {
@@ -321,6 +323,7 @@ enum ServiceTypeEnum {
   SERVICETYPE_E2E = 5;
   SERVICETYPE_OPTICAL_CONNECTIVITY = 6;
   SERVICETYPE_QKD = 7;
+  SERVICETYPE_PON_ACCESS = 8;
 }
 
 enum ServiceStatusEnum {
@@ -533,11 +536,17 @@ message ConfigRule_ACL {
   acl.AclRuleSet rule_set = 2;
 }
 
+message ConfigRule_PON {
+  EndPointId endpoint_id = 1;
+  pon_access.PonRuleSet rule_set = 2;
+}
+
 message ConfigRule {
   ConfigActionEnum action = 1;
   oneof config_rule {
-    ConfigRule_Custom custom = 2;
-    ConfigRule_ACL acl = 3;
+    ConfigRule_Custom custom  = 2;
+    ConfigRule_ACL acl        = 3;
+    ConfigRule_PON pon_access = 5;
   }
 }
 
diff --git a/proto/pon_access.proto b/proto/pon_access.proto
new file mode 100644
index 0000000000000000000000000000000000000000..b3cbb02bd72e54a2475b488a96426001dd89be9f
--- /dev/null
+++ b/proto/pon_access.proto
@@ -0,0 +1,28 @@
+// Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package pon_access;
+
+message PonRuleSet {
+  string m = 1;                // ncclient session
+  string ont_id = 2;           // ID of the ONT to operate on
+  string cvlan = 3;            // (if required) ID of the Customer VLAN
+  string ethernet_port = 4;    // the Ethernet port of the OLT (e.g., 1/1/x1) or similar
+  string svlan = 5;            // Service VLAN to configure
+  string profile = 6;          // "be" (Best Effort) or "ef" (Expedited Forwarding)
+  string bw = 7;               // optional, bandwidth to set (required if profile == "ef")
+  string mask = 8;             // Mask (from original message)
+  string vlan = 9;             // VLAN (from original message)
+}
\ No newline at end of file
diff --git a/quick_deploy.sh b/quick_deploy.sh
new file mode 100755
index 0000000000000000000000000000000000000000..585fcfbc7534107e9a96450135dd592ca8737d66
--- /dev/null
+++ b/quick_deploy.sh
@@ -0,0 +1,438 @@
+#!/bin/bash
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+########################################################################################################################
+# Read deployment settings
+########################################################################################################################
+
+
+# ----- TeraFlowSDN ------------------------------------------------------------
+
+# If not already set, set the URL of the Docker registry where the images will be uploaded to.
+# By default, assume internal MicroK8s registry is used.
+export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"}
+
+# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy.
+# By default, only basic components are deployed
+export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui"}
+
+# If not already set, set the tag you want to use for your images.
+export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"}
+
+# If not already set, set the name of the Kubernetes namespace to deploy TFS to.
+export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
+
+# If not already set, set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS=${TFS_EXTRA_MANIFESTS:-""}
+
+# If not already set, set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD=${TFS_GRAFANA_PASSWORD:-"admin123+"}
+
+# If not already set, disable skip-build flag to rebuild the Docker images.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-"YES"}
+
+# If TFS_SKIP_BUILD is "YES", select the containers to be build
+# Any other container will use previous docker images
+export TFS_QUICK_COMPONENTS="context"
+
+# ----- CockroachDB ------------------------------------------------------------
+
+# If not already set, set the namespace where CockroackDB will be deployed.
+export CRDB_NAMESPACE=${CRDB_NAMESPACE:-"crdb"}
+
+# If not already set, set the database username to be used by Context.
+export CRDB_USERNAME=${CRDB_USERNAME:-"tfs"}
+
+# If not already set, set the database user's password to be used by Context.
+export CRDB_PASSWORD=${CRDB_PASSWORD:-"tfs123"}
+
+# If not already set, set the database name to be used by Context.
+export CRDB_DATABASE=${CRDB_DATABASE:-"tfs"}
+
+
+# ----- NATS -------------------------------------------------------------------
+
+# If not already set, set the namespace where NATS will be deployed.
+export NATS_NAMESPACE=${NATS_NAMESPACE:-"nats"}
+
+
+# ----- QuestDB ----------------------------------------------------------------
+
+# If not already set, set the namespace where QuestDB will be deployed.
+export QDB_NAMESPACE=${QDB_NAMESPACE:-"qdb"}
+
+# If not already set, set the database username to be used for QuestDB.
+export QDB_USERNAME=${QDB_USERNAME:-"admin"}
+
+# If not already set, set the database user's password to be used for QuestDB.
+export QDB_PASSWORD=${QDB_PASSWORD:-"quest"}
+
+# If not already set, set the table name to be used by Monitoring for KPIs.
+export QDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS:-"tfs_monitoring_kpis"}
+
+# If not already set, set the table name to be used by Slice for plotting groups.
+export QDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS:-"tfs_slice_groups"}
+
+
+########################################################################################################################
+# Automated steps start here
+########################################################################################################################
+
+# Constants
+GITLAB_REPO_URL="labs.etsi.org:5050/tfs/controller"
+TMP_FOLDER="./tmp"
+
+# Create a tmp folder for files modified during the deployment
+TMP_MANIFESTS_FOLDER="$TMP_FOLDER/manifests"
+mkdir -p $TMP_MANIFESTS_FOLDER
+TMP_LOGS_FOLDER="$TMP_FOLDER/logs"
+mkdir -p $TMP_LOGS_FOLDER
+
+echo "Deleting and Creating a new namespace..."
+kubectl delete namespace $TFS_K8S_NAMESPACE --ignore-not-found
+kubectl create namespace $TFS_K8S_NAMESPACE
+printf "\n"
+
+echo "Create secret with CockroachDB data"
+CRDB_SQL_PORT=$(kubectl --namespace ${CRDB_NAMESPACE} get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+kubectl create secret generic crdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=CRDB_NAMESPACE=${CRDB_NAMESPACE} \
+    --from-literal=CRDB_SQL_PORT=${CRDB_SQL_PORT} \
+    --from-literal=CRDB_DATABASE=${CRDB_DATABASE} \
+    --from-literal=CRDB_USERNAME=${CRDB_USERNAME} \
+    --from-literal=CRDB_PASSWORD=${CRDB_PASSWORD} \
+    --from-literal=CRDB_SSLMODE=require
+printf "\n"
+
+echo "Create secret with NATS data"
+NATS_CLIENT_PORT=$(kubectl --namespace ${NATS_NAMESPACE} get service nats -o 'jsonpath={.spec.ports[?(@.name=="client")].port}')
+kubectl create secret generic nats-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=NATS_NAMESPACE=${NATS_NAMESPACE} \
+    --from-literal=NATS_CLIENT_PORT=${NATS_CLIENT_PORT}
+printf "\n"
+
+echo "Create secret with QuestDB data"
+QDB_HTTP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+QDB_ILP_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="ilp")].port}')
+QDB_SQL_PORT=$(kubectl --namespace ${QDB_NAMESPACE} get service questdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+METRICSDB_HOSTNAME="questdb-public.${QDB_NAMESPACE}.svc.cluster.local"
+kubectl create secret generic qdb-data --namespace ${TFS_K8S_NAMESPACE} --type='Opaque' \
+    --from-literal=QDB_NAMESPACE=${QDB_NAMESPACE} \
+    --from-literal=METRICSDB_HOSTNAME=${METRICSDB_HOSTNAME} \
+    --from-literal=METRICSDB_REST_PORT=${QDB_HTTP_PORT} \
+    --from-literal=METRICSDB_ILP_PORT=${QDB_ILP_PORT} \
+    --from-literal=METRICSDB_SQL_PORT=${QDB_SQL_PORT} \
+    --from-literal=METRICSDB_TABLE_MONITORING_KPIS=${QDB_TABLE_MONITORING_KPIS} \
+    --from-literal=METRICSDB_TABLE_SLICE_GROUPS=${QDB_TABLE_SLICE_GROUPS} \
+    --from-literal=METRICSDB_USERNAME=${QDB_USERNAME} \
+    --from-literal=METRICSDB_PASSWORD=${QDB_PASSWORD}
+printf "\n"
+
+echo "Deploying components and collecting environment variables..."
+ENV_VARS_SCRIPT=tfs_runtime_env_vars.sh
+echo "# Environment variables for TeraFlowSDN deployment" > $ENV_VARS_SCRIPT
+PYTHONPATH=$(pwd)/src
+echo "export PYTHONPATH=${PYTHONPATH}" >> $ENV_VARS_SCRIPT
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Processing '$COMPONENT' component..."
+
+    if [ "$TFS_SKIP_BUILD" != "YES" ]; then
+        echo "  Building Docker image..."
+        BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log"
+
+        if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG"
+        elif [ "$COMPONENT" == "pathcomp" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log"
+            docker build -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG"
+
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log"
+            docker build -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG"
+            # next command is redundant, but helpful to keep cache updated between rebuilds
+            IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder"
+            docker build -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log"
+            docker build -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG"
+
+            BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log"
+            docker build -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG"
+        else
+            docker build -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG"
+        fi
+
+        echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
+
+        if [ "$COMPONENT" == "pathcomp" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log"
+            docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log"
+            docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        elif [ "$COMPONENT" == "dlt" ]; then
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log"
+            docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log"
+            docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        else
+            IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+            TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log"
+            docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+            PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log"
+            docker push "$IMAGE_URL" > "$PUSH_LOG"
+        fi
+    else 
+        for QUICK_COMPONENT in $TFS_QUICK_COMPONENTS; do
+            if [ "$COMPONENT" == "$QUICK_COMPONENT" ]; then
+                
+                echo "  Building Docker image..."
+                BUILD_LOG="$TMP_LOGS_FOLDER/build_${QUICK_COMPONENT}.log"
+
+                docker build -t "$QUICK_COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$QUICK_COMPONENT"/Dockerfile . > "$BUILD_LOG"
+                echo "  Pushing Docker image to '$TFS_REGISTRY_IMAGES'..."
+
+                
+
+                IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$QUICK_COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+
+                TAG_LOG="$TMP_LOGS_FOLDER/tag_${QUICK_COMPONENT}.log"
+                docker tag "$QUICK_COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG"
+
+                PUSH_LOG="$TMP_LOGS_FOLDER/push_${QUICK_COMPONENT}.log"
+                docker push "$IMAGE_URL" > "$PUSH_LOG"
+            fi
+        done
+    fi
+
+    echo "  Adapting '$COMPONENT' manifest file..."
+    MANIFEST="$TMP_MANIFESTS_FOLDER/${COMPONENT}service.yaml"
+    cp ./manifests/"${COMPONENT}"service.yaml "$MANIFEST"
+
+    if [ "$COMPONENT" == "pathcomp" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-frontend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-frontend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-backend:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-backend:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    elif [ "$COMPONENT" == "dlt" ]; then
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-connector:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-connector:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}-gateway:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT-gateway:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    else
+        IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g')
+        VERSION=$(grep -i "${GITLAB_REPO_URL}/${COMPONENT}:" "$MANIFEST" | cut -d ":" -f4)
+        sed -E -i "s#image: $GITLAB_REPO_URL/$COMPONENT:${VERSION}#image: $IMAGE_URL#g" "$MANIFEST"
+    fi
+
+    sed -E -i "s#imagePullPolicy: .*#imagePullPolicy: Always#g" "$MANIFEST"
+
+    # TODO: harmonize names of the monitoring component
+
+    echo "  Deploying '$COMPONENT' component to Kubernetes..."
+    DEPLOY_LOG="$TMP_LOGS_FOLDER/deploy_${COMPONENT}.log"
+    kubectl --namespace $TFS_K8S_NAMESPACE apply -f "$MANIFEST" > "$DEPLOY_LOG"
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=0 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+    #kubectl --namespace $TFS_K8S_NAMESPACE scale deployment --replicas=1 ${COMPONENT_OBJNAME}service >> "$DEPLOY_LOG"
+
+    echo "  Collecting env-vars for '$COMPONENT' component..."
+
+    SERVICE_DATA=$(kubectl get service ${COMPONENT_OBJNAME}service --namespace $TFS_K8S_NAMESPACE -o json)
+    if [ -z "${SERVICE_DATA}" ]; then continue; fi
+
+    # Env vars for service's host address
+    SERVICE_HOST=$(echo ${SERVICE_DATA} | jq -r '.spec.clusterIP')
+    if [ -z "${SERVICE_HOST}" ]; then continue; fi
+    ENVVAR_HOST=$(echo "${COMPONENT}service_SERVICE_HOST" | tr '[:lower:]' '[:upper:]')
+    echo "export ${ENVVAR_HOST}=${SERVICE_HOST}" >> $ENV_VARS_SCRIPT
+
+    # Env vars for service's 'grpc' port (if any)
+    SERVICE_PORT_GRPC=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="grpc") | .port')
+    if [ -n "${SERVICE_PORT_GRPC}" ]; then
+        ENVVAR_PORT_GRPC=$(echo "${COMPONENT}service_SERVICE_PORT_GRPC" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_GRPC}=${SERVICE_PORT_GRPC}" >> $ENV_VARS_SCRIPT
+    fi
+
+    # Env vars for service's 'http' port (if any)
+    SERVICE_PORT_HTTP=$(echo ${SERVICE_DATA} | jq -r '.spec.ports[] | select(.name=="http") | .port')
+    if [ -n "${SERVICE_PORT_HTTP}" ]; then
+        ENVVAR_PORT_HTTP=$(echo "${COMPONENT}service_SERVICE_PORT_HTTP" | tr '[:lower:]' '[:upper:]')
+        echo "export ${ENVVAR_PORT_HTTP}=${SERVICE_PORT_HTTP}" >> $ENV_VARS_SCRIPT
+    fi
+
+    printf "\n"
+done
+
+echo "Deploying extra manifests..."
+for EXTRA_MANIFEST in $TFS_EXTRA_MANIFESTS; do
+    echo "Processing manifest '$EXTRA_MANIFEST'..."
+    if [[ "$EXTRA_MANIFEST" == *"servicemonitor"* ]]; then
+        kubectl apply -f $EXTRA_MANIFEST
+    else
+        kubectl --namespace $TFS_K8S_NAMESPACE apply -f $EXTRA_MANIFEST
+    fi
+    printf "\n"
+done
+printf "\n"
+
+for COMPONENT in $TFS_COMPONENTS; do
+    echo "Waiting for '$COMPONENT' component..."
+    COMPONENT_OBJNAME=$(echo "${COMPONENT}" | sed "s/\_/-/")
+    kubectl wait --namespace $TFS_K8S_NAMESPACE \
+        --for='condition=available' --timeout=300s deployment/${COMPONENT_OBJNAME}service
+    printf "\n"
+done
+
+if [[ "$TFS_COMPONENTS" == *"webui"* ]] && [[ "$TFS_COMPONENTS" == *"monitoring"* ]]; then
+    echo "Configuring WebUI DataStores and Dashboards..."
+    sleep 5
+
+    # Exposed through the ingress controller "tfs-ingress"
+    GRAFANA_URL="127.0.0.1:80/grafana"
+
+    # Default Grafana credentials
+    GRAFANA_USERNAME="admin"
+    GRAFANA_PASSWORD="admin"
+
+    # Configure Grafana Admin Password
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/user/#change-password
+    GRAFANA_URL_DEFAULT="http://${GRAFANA_USERNAME}:${GRAFANA_PASSWORD}@${GRAFANA_URL}"
+
+    echo ">> Updating Grafana 'admin' password..."
+    curl -X PUT -H "Content-Type: application/json" -d '{
+        "oldPassword": "'${GRAFANA_PASSWORD}'",
+        "newPassword": "'${TFS_GRAFANA_PASSWORD}'",
+        "confirmNew" : "'${TFS_GRAFANA_PASSWORD}'"
+    }' ${GRAFANA_URL_DEFAULT}/api/user/password
+    echo
+    echo
+
+    # Updated Grafana API URL
+    GRAFANA_URL_UPDATED="http://${GRAFANA_USERNAME}:${TFS_GRAFANA_PASSWORD}@${GRAFANA_URL}"
+    echo "export GRAFANA_URL_UPDATED=${GRAFANA_URL_UPDATED}" >> $ENV_VARS_SCRIPT
+
+    echo ">> Installing Scatter Plot plugin..."
+    curl -X POST -H "Content-Type: application/json" -H "Content-Length: 0" \
+        ${GRAFANA_URL_UPDATED}/api/plugins/michaeldmoore-scatter-panel/install
+    echo
+
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/data_source/
+    QDB_HOST_PORT="${METRICSDB_HOSTNAME}:${QDB_SQL_PORT}"
+    echo ">> Creating datasources..."
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-mon-kpi",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_MONITORING_KPIS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": true,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d '{
+        "access"   : "proxy",
+        "type"     : "postgres",
+        "name"     : "questdb-slc-grp",
+        "url"      : "'${QDB_HOST_PORT}'",
+        "database" : "'${QDB_TABLE_SLICE_GROUPS}'",
+        "user"     : "'${QDB_USERNAME}'",
+        "basicAuth": false,
+        "isDefault": false,
+        "jsonData" : {
+            "sslmode"               : "disable",
+            "postgresVersion"       : 1100,
+            "maxOpenConns"          : 0,
+            "maxIdleConns"          : 2,
+            "connMaxLifetime"       : 14400,
+            "tlsAuth"               : false,
+            "tlsAuthWithCACert"     : false,
+            "timescaledb"           : false,
+            "tlsConfigurationMethod": "file-path",
+            "tlsSkipVerify"         : true
+        },
+        "secureJsonData": {"password": "'${QDB_PASSWORD}'"}
+    }' ${GRAFANA_URL_UPDATED}/api/datasources
+    printf "\n\n"
+
+    echo ">> Creating dashboards..."
+    # Ref: https://grafana.com/docs/grafana/latest/http_api/dashboard/
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_mon_kpis_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    echo
+
+    curl -X POST -H "Content-Type: application/json" -d '@src/webui/grafana_db_slc_grps_psql.json' \
+        ${GRAFANA_URL_UPDATED}/api/dashboards/db
+    printf "\n\n"
+
+    echo ">> Staring dashboards..."
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-l3-monit"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    DASHBOARD_URL="${GRAFANA_URL_UPDATED}/api/dashboards/uid/tfs-slice-grps"
+    DASHBOARD_ID=$(curl -s "${DASHBOARD_URL}" | jq '.dashboard.id')
+    curl -X POST ${GRAFANA_URL_UPDATED}/api/user/stars/dashboard/${DASHBOARD_ID}
+    echo
+
+    printf "\n\n"
+fi
diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py
index 9a982d1eb71e8b139d2a86fe1a774154239c7147..7b2b2a51698230e8b7ffe6fd621b99d71092524f 100644
--- a/src/common/DeviceTypes.py
+++ b/src/common/DeviceTypes.py
@@ -50,6 +50,7 @@ class DeviceTypeEnum(Enum):
     XR_CONSTELLATION                = 'xr-constellation'
     QKD_NODE                        = 'qkd-node'
     OPEN_ROADM                      = 'openroadm'
+    PON_CONTROLLER                  = 'pon-controller'
 
     # ETSI TeraFlowSDN controller
     TERAFLOWSDN_CONTROLLER          = 'teraflowsdn'
diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py
index ab399adbe734adeb55f60c804aea0e4877072316..b4936abccfa66e3ceaf775b705279798fd914bf6 100644
--- a/src/common/tools/object_factory/Service.py
+++ b/src/common/tools/object_factory/Service.py
@@ -82,6 +82,16 @@ def json_service_tapi_planned(
         status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
         config_rules=config_rules)
 
+def json_service_iplink_planned(
+        service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
+        config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
+    ):
+
+    return json_service(
+        service_uuid, ServiceTypeEnum.SERVICETYPE_IPLINK, context_id=json_context_id(context_uuid),
+        status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints,
+        config_rules=config_rules)
+
 def json_service_p4_planned(
         service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [],
         config_rules : List[Dict] = [], context_uuid : str = DEFAULT_CONTEXT_NAME
diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py
index 70f5c4220252a3515eab017c1c332af99b082813..90d8e0cfd4c81612a9fd9d01ef63669faacaf5fe 100644
--- a/src/common/type_checkers/Assertions.py
+++ b/src/common/type_checkers/Assertions.py
@@ -50,6 +50,8 @@ def validate_device_driver_enum(message):
         'DEVICEDRIVER_IETF_ACTN',
         'DEVICEDRIVER_OC',
         'DEVICEDRIVER_QKD',
+        'DEVICEDRIVER_PON',
+        
     ]
 
 def validate_device_operational_status_enum(message):
@@ -107,6 +109,7 @@ def validate_service_type_enum(message):
         'SERVICETYPE_E2E',
         'SERVICETYPE_OPTICAL_CONNECTIVITY',
         'SERVICETYPE_QKD',
+        'SERVICETYPE_PON_ACCESS',
     ]
 
 def validate_service_state_enum(message):
@@ -144,6 +147,7 @@ def validate_uuid(message, allow_empty=False):
 CONFIG_RULE_TYPES = {
     'custom',
     'acl',
+    'pon_access'
 }
 def validate_config_rule(message):
     assert isinstance(message, dict)
diff --git a/src/context/service/database/ConfigRule.py b/src/context/service/database/ConfigRule.py
index 4074eb9c01032612a272fdd80ebd6041c3f0a8ea..c57d4a4abcd986102d63a3c789ff279af750d935 100644
--- a/src/context/service/database/ConfigRule.py
+++ b/src/context/service/database/ConfigRule.py
@@ -71,6 +71,10 @@ def compose_config_rules_data(
             _, _, endpoint_uuid = endpoint_get_uuid(config_rule.acl.endpoint_id, allow_random=False)
             rule_set_name = config_rule.acl.rule_set.name
             configrule_name = '{:s}:{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid, rule_set_name)
+        elif kind == ConfigRuleKindEnum.PON_ACCESS:
+            _, _, endpoint_uuid = endpoint_get_uuid(config_rule.pon_access.endpoint_id, allow_random=False)
+            rule_set_name = "pon_access_service"
+            configrule_name = '{:s}:{:s}:{:s}:{:s}'.format(parent_kind, kind.value, endpoint_uuid, rule_set_name)
         else:
             MSG = 'Name for ConfigRule({:s}) cannot be inferred '+\
                   '(device_uuid={:s}, service_uuid={:s}, slice_uuid={:s})'
diff --git a/src/context/service/database/Service.py b/src/context/service/database/Service.py
index 62f07e4fbe4f90af7834358cc79d4c8cb82934f4..c4235baa8dceea84ab8e6bf90a931534502770b4 100644
--- a/src/context/service/database/Service.py
+++ b/src/context/service/database/Service.py
@@ -88,6 +88,9 @@ def service_set(db_engine : Engine, messagebroker : MessageBroker, request : Ser
     service_type = grpc_to_enum__service_type(request.service_type)
     if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY:
         service_type = "OPTICAL_CONNECTIVITY"
+        
+    if service_type is None and request.service_type == ServiceTypeEnum.SERVICETYPE_PON_ACCESS :
+        service_type = "PON_ACCESS"
 
     service_status = grpc_to_enum__service_status(request.service_status.service_status)
 
diff --git a/src/context/service/database/models/ConfigRuleModel.py b/src/context/service/database/models/ConfigRuleModel.py
index 029aa6867349e10eb56546ce65d2541065ec6a6f..03e882d98f706ef5efde44eeac10ac1cf3aa8cf2 100644
--- a/src/context/service/database/models/ConfigRuleModel.py
+++ b/src/context/service/database/models/ConfigRuleModel.py
@@ -23,6 +23,7 @@ from ._Base import _Base
 class ConfigRuleKindEnum(enum.Enum):
     CUSTOM = 'custom'
     ACL    = 'acl'
+    PON_ACCESS = 'pon_access'
 
 class DeviceConfigRuleModel(_Base):
     __tablename__ = 'device_configrule'
diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py
index fe0d83fb1886a42526b1c71304b7e3ecc2b0b7d7..27293141bf00f2c42ee6d946e44a558d17c25cf7 100644
--- a/src/context/service/database/models/enums/DeviceDriver.py
+++ b/src/context/service/database/models/enums/DeviceDriver.py
@@ -38,6 +38,7 @@ class ORM_DeviceDriverEnum(enum.Enum):
     IETF_SLICE            = DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE
     OC                    = DeviceDriverEnum.DEVICEDRIVER_OC
     QKD                   = DeviceDriverEnum.DEVICEDRIVER_QKD
+    PON                   = DeviceDriverEnum.DEVICEDRIVER_PON
 
 grpc_to_enum__device_driver = functools.partial(
     grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum)
diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py
index 45f849a2643a328284e200f1718b02191fab9563..de96b7c7a86d18c69884eb23cc7d9a7faff5fac1 100644
--- a/src/context/service/database/models/enums/ServiceType.py
+++ b/src/context/service/database/models/enums/ServiceType.py
@@ -30,6 +30,7 @@ class ORM_ServiceTypeEnum(enum.Enum):
     E2E                       = ServiceTypeEnum.SERVICETYPE_E2E
     OPTICAL_CONNECTIVITY      = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY
     QKD                       = ServiceTypeEnum.SERVICETYPE_QKD
+    PON_ACCESS                = ServiceTypeEnum.SERVICETYPE_PON_ACCESS
 
 grpc_to_enum__service_type = functools.partial(
     grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum)
diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py
index ee683853676b34f256ff335fffe3600823e2c070..9164d050a334d4697d4ab2690a613b8a6d9fdd1d 100644
--- a/src/device/service/Tools.py
+++ b/src/device/service/Tools.py
@@ -313,6 +313,15 @@ def compute_rules_to_add_delete(
             ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/acl_ruleset[{:s}]'
             key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, acl_ruleset_name)            
             context_config_rules[key_or_path] = grpc_message_to_json(config_rule.acl)    # get the resource value of the acl
+
+            
+        elif config_rule_kind == 'pon_access':
+            device_uuid = config_rule.pon_access.endpoint_id.device_id.device_uuid.uuid # get the device name
+            endpoint_uuid = config_rule.pon_access.endpoint_id.endpoint_uuid.uuid       # get the endpoint name    request_config_rules = []
+            pon_access_ruleset_name = config_rule.pon_access.rule_set.name                     # get the pon_access name
+            PON_ACCESS_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/pon_access_ruleset[{:s}]'
+            key_or_path = PON_ACCESS_KEY_TEMPLATE.format(device_uuid, endpoint_uuid, pon_access_ruleset_name)            
+            context_config_rules[key_or_path] = grpc_message_to_json(config_rule.pon_access)    # get the resource value of the pon_access
  
     request_config_rules = []
     for config_rule in request.device_config.config_rules:
@@ -331,6 +340,16 @@ def compute_rules_to_add_delete(
                 config_rule.action, key_or_path, grpc_message_to_json(config_rule.acl)
             ))
 
+        
+        elif config_rule_kind == 'pon_access':  # resource management of "pon_access" rule  
+            device_uuid = config_rule.pon_access.endpoint_id.device_id.device_uuid.uuid
+            endpoint_uuid = config_rule.pon_access.endpoint_id.endpoint_uuid.uuid
+            PON_ACCESS_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/pon_access_ruleset'
+            key_or_path = PON_ACCESS_KEY_TEMPLATE.format(device_uuid, endpoint_uuid) 
+            request_config_rules.append((
+                config_rule.action, key_or_path, grpc_message_to_json(config_rule.pon_access)
+            ))
+
     resources_to_set    : List[Tuple[str, Any]] = [] # key, value
     resources_to_delete : List[Tuple[str, Any]] = [] # key, value
 
diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py
index e3102cdf523a4e0b551873bb8f0c423db00aebf0..1d24749e2c780044c72f4616dda9d86e1be2e7be 100644
--- a/src/device/service/drivers/__init__.py
+++ b/src/device/service/drivers/__init__.py
@@ -217,3 +217,13 @@ if LOAD_ALL_DEVICE_DRIVERS:
                 FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_QKD,
             }
         ]))
+
+if LOAD_ALL_DEVICE_DRIVERS:
+    from .pon_driver.PON_Driver import PON_Driver # pylint: disable=wrong-import-position
+    DRIVERS.append(
+        (PON_Driver, [
+            {
+                FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PON_CONTROLLER,
+                FilterFieldEnum.DRIVER     : DeviceDriverEnum.DEVICEDRIVER_PON,
+            }
+        ]))
\ No newline at end of file
diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py
index e8ca15116d5de853a3730c20e89146c570df8d02..6a09ba57f92e4f9188a2c3e757cc3f9f2f48f736 100644
--- a/src/device/service/drivers/openconfig/templates/Interfaces.py
+++ b/src/device/service/drivers/openconfig/templates/Interfaces.py
@@ -39,11 +39,11 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]:
         elif xml_interface.find('oci:state/oci:type', namespaces=NAMESPACES) is not None:
             interface_type = xml_interface.find('oci:state/oci:type', namespaces=NAMESPACES)
         else: continue
-            
+
         interface_name = xml_interface.find('oci:name', namespaces=NAMESPACES)
         if interface_name is None or interface_name.text is None: continue
         add_value_from_tag(interface, 'name', interface_name)
-            
+
         # Get the type of interface according to the vendor's type
         if 'ianaift:' in interface_type.text:
             interface_type.text = interface_type.text.replace('ianaift:', '')                       #ADVA
diff --git a/src/device/service/drivers/openconfig/templates/Tools.py b/src/device/service/drivers/openconfig/templates/Tools.py
index 3adc908b3afb4b200fa10dc588bf09c9799820b2..b2a212cf7fbb3eb51364f144098d2931b18c1f9a 100644
--- a/src/device/service/drivers/openconfig/templates/Tools.py
+++ b/src/device/service/drivers/openconfig/templates/Tools.py
@@ -87,4 +87,4 @@ def generate_templates(resource_key: str, resource_value: str, delete: bool,vend
         if "acl_ruleset" in resource_key:                                               # acl rules management
             result_templates.extend(acl_mgmt(resource_value,vendor, delete))
 
-    return result_templates
\ No newline at end of file
+    return result_templates
diff --git a/src/device/service/drivers/pon_driver/Constants.py b/src/device/service/drivers/pon_driver/Constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d349152eb03c04222ae376a3aed21605bcc7708
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/Constants.py
@@ -0,0 +1,21 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES
+
+SPECIAL_RESOURCE_MAPPINGS = {
+    RESOURCE_ENDPOINTS        : '/endpoints',
+    RESOURCE_INTERFACES       : '/interfaces',
+    RESOURCE_NETWORK_INSTANCES: '/net-instances',
+}
diff --git a/src/device/service/drivers/pon_driver/PON_Driver.py b/src/device/service/drivers/pon_driver/PON_Driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb391ac124d7fd92deb0d1775427852a071a0967
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/PON_Driver.py
@@ -0,0 +1,293 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import anytree, json, logging, pytz, queue, re, threading
+from datetime import datetime, timedelta
+from typing import Any, Iterator, List, Optional, Tuple, Union
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.job import Job
+from apscheduler.jobstores.memory import MemoryJobStore
+from apscheduler.schedulers.background import BackgroundScheduler
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.type_checkers.Checkers import chk_float, chk_length, chk_string, chk_type
+from device.service.driver_api._Driver import _Driver
+from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value
+from .Constants import SPECIAL_RESOURCE_MAPPINGS
+from .SyntheticSamplingParameters import SyntheticSamplingParameters, do_sampling
+from .Tools import compose_resource_endpoint
+from .connectionRequest import connectionRequest
+import requests
+
+
+LOGGER = logging.getLogger(__name__)
+
+RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r'^\/interface\[([^\]]+)\].*')
+
+DRIVER_NAME = 'PON_DRIVER'
+METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME})
+
+
+class PON_Driver(_Driver):
+    def __init__(self, address : str, port : int, **settings) -> None:
+        super().__init__(DRIVER_NAME, address, port, **settings)
+        self.__lock = threading.Lock()
+        self.__initial = TreeNode('.')
+        self.__running = TreeNode('.')
+        self.__subscriptions = TreeNode('.')
+
+        endpoints = self.settings.get('endpoints', [])
+        endpoint_resources = []
+        for endpoint in endpoints:
+            endpoint_resource = compose_resource_endpoint(endpoint)
+            if endpoint_resource is None: continue
+            endpoint_resources.append(endpoint_resource)
+        self.SetConfig(endpoint_resources)
+
+        self.__started = threading.Event()
+        self.__terminate = threading.Event()
+        self.__scheduler = BackgroundScheduler(daemon=True) # scheduler used to emulate sampling events
+        self.__scheduler.configure(
+            jobstores = {'default': MemoryJobStore()},
+            executors = {'default': ThreadPoolExecutor(max_workers=1)},
+            job_defaults = {'coalesce': False, 'max_instances': 3},
+            timezone=pytz.utc)
+        self.__out_samples = queue.Queue()
+        self.__synthetic_sampling_parameters = SyntheticSamplingParameters()
+
+    def Connect(self) -> bool:
+        # If started, assume it is already connected
+        if self.__started.is_set(): return True
+
+        # Connect triggers activation of sampling events that will be scheduled based on subscriptions
+        self.__scheduler.start()
+
+        # Indicate the driver is now connected to the device
+        self.__started.set()
+        return True
+
+    def Disconnect(self) -> bool:
+        # Trigger termination of loops and processes
+        self.__terminate.set()
+
+        # If not started, assume it is already disconnected
+        if not self.__started.is_set(): return True
+
+        # Disconnect triggers deactivation of sampling events
+        self.__scheduler.shutdown()
+        return True
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetInitialConfig(self) -> List[Tuple[str, Any]]:
+        with self.__lock:
+            return dump_subtree(self.__initial)
+
+    @metered_subclass_method(METRICS_POOL)
+    def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]:
+        chk_type('resources', resource_keys, list)
+        with self.__lock:
+            if len(resource_keys) == 0: return dump_subtree(self.__running)
+            results = []
+            resolver = anytree.Resolver(pathattr='name')
+            for i,resource_key in enumerate(resource_keys):
+                str_resource_name = 'resource_key[#{:d}]'.format(i)
+                try:
+                    chk_string(str_resource_name, resource_key, allow_empty=False)
+                    resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key)
+                    resource_path = resource_key.split('/')
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Exception validating {:s}: {:s}'.format(str_resource_name, str(resource_key)))
+                    results.append((resource_key, e)) # if validation fails, store the exception
+                    continue
+
+                resource_node = get_subnode(resolver, self.__running, resource_path, default=None)
+                # if not found, resource_node is None
+                if resource_node is None: continue
+                results.extend(dump_subtree(resource_node))
+            return results
+
+
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+        results = []
+        resolver = anytree.Resolver(pathattr='name')
+        with self.__lock:
+            for i,resource in enumerate(resources):
+                str_resource_name = 'resources[#{:d}]'.format(i)
+                try:
+                    chk_type(str_resource_name, resource, (list, tuple))
+                    chk_length(str_resource_name, resource, min_length=2, max_length=2)
+                    resource_key,resource_value = resource
+                    chk_string(str_resource_name, resource_key, allow_empty=False)
+                    resource_path = resource_key.split('/')
+
+
+                    result = connectionRequest("2", "22", "2","333", "ef", bw="2500000")
+                    results.append(result)
+
+                except Exception as e:
+                    LOGGER.exception(f"Error procesando la configuración {resource}: {str(e)}")
+                    results.append(e)
+
+                try:
+                    resource_value = json.loads(resource_value)
+                except: # pylint: disable=bare-except
+                    pass
+
+                set_subnode_value(resolver, self.__running, resource_path, resource_value)
+
+
+                match = RE_GET_ENDPOINT_FROM_INTERFACE.match(resource_key)
+                if match is not None:
+                    endpoint_uuid = match.group(1)
+                    if '.' in endpoint_uuid: endpoint_uuid = endpoint_uuid.split('.')[0]
+                    self.__synthetic_sampling_parameters.set_endpoint_configured(endpoint_uuid)
+
+                results.append(True)
+        return results
+
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+        results = []
+        resolver = anytree.Resolver(pathattr='name')
+        with self.__lock:
+            for i,resource in enumerate(resources):
+                str_resource_name = 'resources[#{:d}]'.format(i)
+                try:
+                    chk_type(str_resource_name, resource, (list, tuple))
+                    chk_length(str_resource_name, resource, min_length=2, max_length=2)
+                    resource_key,_ = resource
+                    chk_string(str_resource_name, resource_key, allow_empty=False)
+                    resource_path = resource_key.split('/')
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Exception validating {:s}: {:s}'.format(str_resource_name, str(resource_key)))
+                    results.append(e) # if validation fails, store the exception
+                    continue
+
+                resource_node = get_subnode(resolver, self.__running, resource_path, default=None)
+                # if not found, resource_node is None
+                if resource_node is None:
+                    results.append(False)
+                    continue
+
+                match = RE_GET_ENDPOINT_FROM_INTERFACE.match(resource_key)
+                if match is not None:
+                    endpoint_uuid = match.group(1)
+                    if '.' in endpoint_uuid: endpoint_uuid = endpoint_uuid.split('.')[0]
+                    self.__synthetic_sampling_parameters.unset_endpoint_configured(endpoint_uuid)
+
+                parent = resource_node.parent
+                children = list(parent.children)
+                children.remove(resource_node)
+                parent.children = tuple(children)
+                results.append(True)
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        chk_type('subscriptions', subscriptions, list)
+        if len(subscriptions) == 0: return []
+        results = []
+        resolver = anytree.Resolver(pathattr='name')
+        with self.__lock:
+            for i,subscription in enumerate(subscriptions):
+                str_subscription_name = 'subscriptions[#{:d}]'.format(i)
+                try:
+                    chk_type(str_subscription_name, subscription, (list, tuple))
+                    chk_length(str_subscription_name, subscription, min_length=3, max_length=3)
+                    resource_key,sampling_duration,sampling_interval = subscription
+                    chk_string(str_subscription_name + '.resource_key', resource_key, allow_empty=False)
+                    resource_path = resource_key.split('/')
+                    chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0)
+                    chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0)
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key)))
+                    results.append(e) # if validation fails, store the exception
+                    continue
+
+                start_date,end_date = None,None
+                if sampling_duration <= 1.e-12:
+                    start_date = datetime.utcnow()
+                    end_date = start_date + timedelta(seconds=sampling_duration)
+
+                job_id = 'k={:s}/d={:f}/i={:f}'.format(resource_key, sampling_duration, sampling_interval)
+                job = self.__scheduler.add_job(
+                    do_sampling, args=(self.__synthetic_sampling_parameters, resource_key, self.__out_samples),
+                    kwargs={}, id=job_id, trigger='interval', seconds=sampling_interval, start_date=start_date,
+                    end_date=end_date, timezone=pytz.utc)
+
+                subscription_path = resource_path + ['{:.3f}:{:.3f}'.format(sampling_duration, sampling_interval)]
+                set_subnode_value(resolver, self.__subscriptions, subscription_path, job)
+                results.append(True)
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]:
+        chk_type('subscriptions', subscriptions, list)
+        if len(subscriptions) == 0: return []
+        results = []
+        resolver = anytree.Resolver(pathattr='name')
+        with self.__lock:
+            for i,resource in enumerate(subscriptions):
+                str_subscription_name = 'resources[#{:d}]'.format(i)
+                try:
+                    chk_type(str_subscription_name, resource, (list, tuple))
+                    chk_length(str_subscription_name, resource, min_length=3, max_length=3)
+                    resource_key,sampling_duration,sampling_interval = resource
+                    chk_string(str_subscription_name + '.resource_key', resource_key, allow_empty=False)
+                    resource_path = resource_key.split('/')
+                    chk_float(str_subscription_name + '.sampling_duration', sampling_duration, min_value=0)
+                    chk_float(str_subscription_name + '.sampling_interval', sampling_interval, min_value=0)
+                except Exception as e: # pylint: disable=broad-except
+                    LOGGER.exception('Exception validating {:s}: {:s}'.format(str_subscription_name, str(resource_key)))
+                    results.append(e) # if validation fails, store the exception
+                    continue
+
+                subscription_path = resource_path + ['{:.3f}:{:.3f}'.format(sampling_duration, sampling_interval)]
+                subscription_node = get_subnode(resolver, self.__subscriptions, subscription_path)
+
+                # if not found, resource_node is None
+                if subscription_node is None:
+                    results.append(False)
+                    continue
+
+                job : Job = getattr(subscription_node, 'value', None)
+                if job is None or not isinstance(job, Job):
+                    raise Exception('Malformed subscription node or wrong resource key: {:s}'.format(str(resource)))
+                job.remove()
+
+                parent = subscription_node.parent
+                children = list(parent.children)
+                children.remove(subscription_node)
+                parent.children = tuple(children)
+
+                results.append(True)
+        return results
+
+    def GetState(self, blocking=False, terminate : Optional[threading.Event] = None) -> Iterator[Tuple[str, Any]]:
+        while True:
+            if self.__terminate.is_set(): break
+            if terminate is not None and terminate.is_set(): break
+            try:
+                sample = self.__out_samples.get(block=blocking, timeout=0.1)
+            except queue.Empty:
+                if blocking: continue
+                return
+            if sample is None: continue
+            yield sample
diff --git a/src/device/service/drivers/pon_driver/SyntheticSamplingParameters.py b/src/device/service/drivers/pon_driver/SyntheticSamplingParameters.py
new file mode 100644
index 0000000000000000000000000000000000000000..e25e207e87256472a6bebf2da5601d409f189b1f
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/SyntheticSamplingParameters.py
@@ -0,0 +1,86 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, math, queue, random, re, threading
+from datetime import datetime
+from typing import Optional, Tuple
+
+LOGGER = logging.getLogger(__name__)
+
+RE_GET_ENDPOINT_METRIC = re.compile(r'.*\/endpoint\[([^\]]+)\]\/state\/(.*)')
+
+MSG_ERROR_PARSE = '[get] unable to extract endpoint-metric from monitoring_resource_key "{:s}"'
+MSG_INFO = '[get] monitoring_resource_key={:s}, endpoint_uuid={:s}, metric={:s}, metric_sense={:s}'
+
+class SyntheticSamplingParameters:
+    def __init__(self) -> None:
+        self.__lock = threading.Lock()
+        self.__data = {}
+        self.__configured_endpoints = set()
+
+    def set_endpoint_configured(self, endpoint_uuid : str):
+        with self.__lock:
+            self.__configured_endpoints.add(endpoint_uuid)
+
+    def unset_endpoint_configured(self, endpoint_uuid : str):
+        with self.__lock:
+            self.__configured_endpoints.discard(endpoint_uuid)
+
+    def get(self, monitoring_resource_key : str) -> Optional[Tuple[float, float, float, float, float]]:
+        with self.__lock:
+            match = RE_GET_ENDPOINT_METRIC.match(monitoring_resource_key)
+            if match is None:
+                LOGGER.error(MSG_ERROR_PARSE.format(monitoring_resource_key))
+                return None
+            endpoint_uuid = match.group(1)
+
+            # If endpoint is not configured, generate a flat synthetic traffic aligned at 0
+            if endpoint_uuid not in self.__configured_endpoints: return (0, 0, 1, 0, 0)
+
+            metric = match.group(2)
+            metric_sense = metric.lower().replace('packets_', '').replace('bytes_', '')
+
+            LOGGER.debug(MSG_INFO.format(monitoring_resource_key, endpoint_uuid, metric, metric_sense))
+
+            parameters_key = '{:s}-{:s}'.format(endpoint_uuid, metric_sense)
+            parameters = self.__data.get(parameters_key)
+            if parameters is not None: return parameters
+
+            # assume packets
+            amplitude  = 1.e7 * random.random()
+            phase      = 60 * random.random()
+            period     = 3600 * random.random()
+            offset     = 1.e8 * random.random() + amplitude
+            avg_bytes_per_packet = random.randint(500, 1500)
+            parameters = (amplitude, phase, period, offset, avg_bytes_per_packet)
+            return self.__data.setdefault(parameters_key, parameters)
+
+def do_sampling(
+    synthetic_sampling_parameters : SyntheticSamplingParameters, monitoring_resource_key : str,
+    out_samples : queue.Queue
+) -> None:
+    parameters = synthetic_sampling_parameters.get(monitoring_resource_key)
+    if parameters is None: return
+    amplitude, phase, period, offset, avg_bytes_per_packet = parameters
+
+    if 'bytes' in monitoring_resource_key.lower():
+        # convert to bytes
+        amplitude = avg_bytes_per_packet * amplitude
+        offset = avg_bytes_per_packet * offset
+
+    timestamp = datetime.timestamp(datetime.utcnow())
+    waveform  = amplitude * math.sin(2 * math.pi * timestamp / period + phase) + offset
+    noise     = amplitude * random.random()
+    value     = abs(0.95 * waveform + 0.05 * noise)
+    out_samples.put_nowait((timestamp, monitoring_resource_key, value))
diff --git a/src/device/service/drivers/pon_driver/Tools.py b/src/device/service/drivers/pon_driver/Tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f2a105c0d9735f486f41fab5bc3069ec9327f65
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/Tools.py
@@ -0,0 +1,89 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Dict, Optional, Tuple
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from common.type_checkers.Checkers import chk_attribute, chk_string, chk_type
+from device.service.driver_api._Driver import RESOURCE_ENDPOINTS
+from .Constants import SPECIAL_RESOURCE_MAPPINGS
+
+LOGGER = logging.getLogger(__name__)
+
+def process_optional_string_field(
+    endpoint_data : Dict[str, Any], field_name : str, endpoint_resource_value : Dict[str, Any]
+) -> None:
+    field_value = chk_attribute(field_name, endpoint_data, 'endpoint_data', default=None)
+    if field_value is None: return
+    chk_string('endpoint_data.{:s}'.format(field_name), field_value)
+    if len(field_value) > 0: endpoint_resource_value[field_name] = field_value
+
+def compose_resource_endpoint(endpoint_data : Dict[str, Any]) -> Optional[Tuple[str, Dict]]:
+    try:
+        # Check type of endpoint_data
+        chk_type('endpoint_data', endpoint_data, dict)
+
+        # Check endpoint UUID (mandatory)
+        endpoint_uuid = chk_attribute('uuid', endpoint_data, 'endpoint_data')
+        chk_string('endpoint_data.uuid', endpoint_uuid, min_length=1)
+        endpoint_resource_path = SPECIAL_RESOURCE_MAPPINGS.get(RESOURCE_ENDPOINTS)
+        endpoint_resource_key = '{:s}/endpoint[{:s}]'.format(endpoint_resource_path, endpoint_uuid)
+        endpoint_resource_value = {'uuid': endpoint_uuid}
+
+        # Check endpoint optional string fields
+        process_optional_string_field(endpoint_data, 'name', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'type', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'context_uuid', endpoint_resource_value)
+        process_optional_string_field(endpoint_data, 'topology_uuid', endpoint_resource_value)
+
+        # Check endpoint sample types (optional)
+        endpoint_sample_types = chk_attribute('sample_types', endpoint_data, 'endpoint_data', default=[])
+        chk_type('endpoint_data.sample_types', endpoint_sample_types, list)
+        sample_types = {}
+        sample_type_errors = []
+        for i,endpoint_sample_type in enumerate(endpoint_sample_types):
+            field_name = 'endpoint_data.sample_types[{:d}]'.format(i)
+            try:
+                chk_type(field_name, endpoint_sample_type, (int, str))
+                if isinstance(endpoint_sample_type, int):
+                    metric_name = KpiSampleType.Name(endpoint_sample_type)
+                    metric_id = endpoint_sample_type
+                elif isinstance(endpoint_sample_type, str):
+                    metric_id = KpiSampleType.Value(endpoint_sample_type)
+                    metric_name = endpoint_sample_type
+                else:
+                    str_type = str(type(endpoint_sample_type))
+                    raise Exception('Bad format: {:s}'.format(str_type)) # pylint: disable=broad-exception-raised
+            except Exception as e: # pylint: disable=broad-exception-caught
+                MSG = 'Unsupported {:s}({:s}) : {:s}'
+                sample_type_errors.append(MSG.format(field_name, str(endpoint_sample_type), str(e)))
+
+            metric_name = metric_name.lower().replace('kpisampletype_', '')
+            monitoring_resource_key = '{:s}/state/{:s}'.format(endpoint_resource_key, metric_name)
+            sample_types[metric_id] = monitoring_resource_key
+
+        if len(sample_type_errors) > 0:
+            # pylint: disable=broad-exception-raised
+            raise Exception('Malformed Sample Types:\n{:s}'.format('\n'.join(sample_type_errors)))
+
+        if len(sample_types) > 0:
+            endpoint_resource_value['sample_types'] = sample_types
+    
+        if 'location' in endpoint_data:
+            endpoint_resource_value['location'] = endpoint_data['location']
+            
+        return endpoint_resource_key, endpoint_resource_value
+    except: # pylint: disable=bare-except
+        LOGGER.exception('Problem composing endpoint({:s})'.format(str(endpoint_data)))
+        return None
diff --git a/src/device/service/drivers/pon_driver/__init__.py b/src/device/service/drivers/pon_driver/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/device/service/drivers/pon_driver/connectionRequest.py b/src/device/service/drivers/pon_driver/connectionRequest.py
new file mode 100644
index 0000000000000000000000000000000000000000..92a05021209ad2e4b601ca3107e2d2e6b4acfe93
--- /dev/null
+++ b/src/device/service/drivers/pon_driver/connectionRequest.py
@@ -0,0 +1,60 @@
+import requests
+import json
+import logging
+from typing import Optional
+
+LOGGER = logging.getLogger(__name__)
+
+CONTROLLER_IP = "CONTROLLER-IP-ADDRESS"  # REPLACE
+CONTROLLER_PORT = 3333 
+API_ENDPOINT = f"http://{CONTROLLER_IP}:{CONTROLLER_PORT}/api/service/"
+
+def connectionRequest(ont_id: str, cvlan: str, ethernet_port: str, svlan: str, profile: str, bw: str) -> bool:
+    """
+    Envía una petición POST para establecer la conexión PON.
+    En caso de error, muestra el comando curl equivalente.
+    """
+    
+    payload = {
+        "ont_id": ont_id,
+        "cvlan": str(cvlan),
+        "ethernet_port": ethernet_port,
+        "svlan": str(svlan),
+        "profile": profile,
+        "bw": str(bw)
+    }
+    
+    headers = {
+        "accept": "application/json",
+        "Content-Type": "application/json"
+    }
+
+    curl_command = _generate_curl_command(payload, headers)
+    
+    # --------------------DESCOMENTAR CUANDO FUNCIONE-----------------
+    # try:
+    #     response = requests.post(API_ENDPOINT, headers=headers, json=payload)
+    #     
+    #     if response.status_code == 200:
+    #         LOGGER.info(f"PON Connection OK: {payload}")
+    #         return True
+    #         
+    #     LOGGER.error(f"Connection Error. CODE: {response.status_code} - RESPONSE: {response.text}")
+    #     LOGGER.info(f"CURL COMMAND: \n{curl_command}")
+    #     return False
+    #     
+    # except requests.RequestException as e:
+    #     LOGGER.error(f"Connection Error: {str(e)}")
+    #     LOGGER.info(f"CURL COMMAND: \n{curl_command}")
+    #     return False
+
+    print("CURL COMMAND:")
+    print(curl_command)
+    return False
+
+def _generate_curl_command(payload: dict, headers: dict) -> str:
+    header_string = " ".join([f"-H '{k}: {v}'" for k, v in headers.items()])
+    json_data = json.dumps(payload, indent=2).replace("'", "'\\''")
+    return (
+        f"curl -X POST '{API_ENDPOINT}' \\\n         {header_string} \\\n         -d $'{json_data}'"
+    )
diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
index ea54f77978057af9ca99c700b5d1c7a46ed934ae..b5f5a93004c79157e12a97618825b90a1be975cb 100644
--- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py
+++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py
@@ -24,7 +24,7 @@ from pathcomp.frontend.Config import BACKEND_URL
 from .tools.EroPathToHops import eropath_to_hops
 from .tools.ComposeConfigRules import (
     compose_device_config_rules, compose_l2nm_config_rules, compose_l3nm_config_rules, compose_tapi_config_rules,
-    generate_neighbor_endpoint_config_rules
+    generate_neighbor_endpoint_config_rules, compose_pon_access_config_rules
 )
 from .tools.ComposeRequest import compose_device, compose_link, compose_service
 from .tools.ComputeSubServices import (
@@ -183,6 +183,10 @@ class _Algorithm:
             compose_l3nm_config_rules(config_rules, service.service_config.config_rules)
         elif service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
             compose_tapi_config_rules(config_rules, service.service_config.config_rules)
+        elif service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
+            compose_tapi_config_rules(config_rules, service.service_config.config_rules)
+        elif service_type == ServiceTypeEnum.SERVICETYPE_PON_ACCESS:
+            compose_pon_access_config_rules(config_rules, service.service_config.config_rules)
         else:
             MSG = 'Unhandled generic Config Rules for service {:s} {:s}'
             self.logger.warning(MSG.format(str(service_uuid), str(ServiceTypeEnum.Name(service_type))))
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
index 9eac4d353ef48f6e75b478d137e6e9bb9c3e1c03..96161350529c5d5db4d57aaff89901ad2bd6288d 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeConfigRules.py
@@ -108,6 +108,12 @@ def compose_tapi_config_rules(main_service_config_rules : List, subservice_confi
     for rule_name, defaults in CONFIG_RULES:
         compose_config_rules(main_service_config_rules, subservice_config_rules, rule_name, defaults)
 
+
+def compose_pon_access_config_rules(main_service_config_rules : List, subservice_config_rules : List) -> None:
+    CONFIG_RULES: List[Tuple[str, dict]] = [(SETTINGS_RULE_NAME)]
+    for rule_name, defaults in CONFIG_RULES:
+        compose_config_rules(main_service_config_rules, subservice_config_rules, rule_name, defaults)
+
 def compose_device_config_rules(
     config_rules : List, subservice_config_rules : List, path_hops : List,
     device_name_mapping : Dict[str, str], endpoint_name_mapping : Dict[Tuple[str, str], str]
@@ -157,6 +163,30 @@ def compose_device_config_rules(
             LOGGER.debug('[compose_device_config_rules]   adding acl config rule')
             subservice_config_rules.append(config_rule)
 
+        elif config_rule.WhichOneof('config_rule') == 'pon_access':
+            LOGGER.debug('[compose_device_config_rules]   is pon_access')
+            endpoint_id = config_rule.pon_access.endpoint_id
+            device_uuid_or_name = endpoint_id.device_id.device_uuid.uuid
+            LOGGER.debug('[compose_device_config_rules]   device_uuid_or_name={:s}'.format(str(device_uuid_or_name)))
+            device_name_or_uuid = device_name_mapping.get(device_uuid_or_name, device_uuid_or_name)
+            LOGGER.debug('[compose_device_config_rules]   device_name_or_uuid={:s}'.format(str(device_name_or_uuid)))
+            device_keys = {device_uuid_or_name, device_name_or_uuid}
+            if len(device_keys.intersection(devices_traversed)) == 0: continue
+
+            endpoint_uuid = endpoint_id.endpoint_uuid.uuid
+            LOGGER.debug('[compose_device_config_rules]   endpoint_uuid={:s}'.format(str(endpoint_uuid)))
+            endpoint_uuid_or_name = (endpoint_uuid[::-1].split('.', maxsplit=1)[-1])[::-1]
+            LOGGER.debug('[compose_device_config_rules]   endpoint_uuid_or_name={:s}'.format(str(endpoint_uuid_or_name)))
+            endpoint_name_or_uuid_1 = endpoint_name_mapping[(device_uuid_or_name, endpoint_uuid_or_name)]
+            endpoint_name_or_uuid_2 = endpoint_name_mapping[(device_name_or_uuid, endpoint_uuid_or_name)]
+            endpoint_keys = {endpoint_uuid_or_name, endpoint_name_or_uuid_1, endpoint_name_or_uuid_2}
+
+            device_endpoint_keys = set(itertools.product(device_keys, endpoint_keys))
+            if len(device_endpoint_keys.intersection(endpoints_traversed)) == 0: continue
+
+            LOGGER.debug('[compose_device_config_rules]   adding pon_access config rule')
+            subservice_config_rules.append(config_rule)
+
         elif config_rule.WhichOneof('config_rule') == 'custom':
             LOGGER.debug('[compose_device_config_rules]   is custom')
 
@@ -292,49 +322,56 @@ def generate_neighbor_endpoint_config_rules(
 
         for config_rule in config_rules:
             # Only applicable, by now, to Custom Config Rules for endpoint settings
-            if 'custom' not in config_rule: continue
-            match = RE_ENDPOINT_SETTINGS.match(config_rule['custom']['resource_key'])
-            if match is None:
-                match = RE_ENDPOINT_VLAN_SETTINGS.match(config_rule['custom']['resource_key'])
-            if match is None: continue
-
-            resource_key_values = match.groups()
-            if resource_key_values[0:2] in device_endpoint_keys_a:
-                resource_key_values = list(resource_key_values)
-                resource_key_values[0] = link_endpoint_b['device']
-                resource_key_values[1] = link_endpoint_b['ingress_ep']
-            elif resource_key_values[0:2] in device_endpoint_keys_b:
-                resource_key_values = list(resource_key_values)
-                resource_key_values[0] = link_endpoint_a['device']
-                resource_key_values[1] = link_endpoint_a['egress_ep']
+            if 'custom' not in config_rule or 'pon_access' not in config_rule: continue
+            if 'custom'  in config_rule: 
+                match = RE_ENDPOINT_SETTINGS.match(config_rule['custom']['resource_key'])
+                if match is None:
+                    match = RE_ENDPOINT_VLAN_SETTINGS.match(config_rule['custom']['resource_key'])
+                if match is None: continue
+                resource_key_values = match.groups()
+                if resource_key_values[0:2] in device_endpoint_keys_a:
+                    resource_key_values = list(resource_key_values)
+                    resource_key_values[0] = link_endpoint_b['device']
+                    resource_key_values[1] = link_endpoint_b['ingress_ep']
+                elif resource_key_values[0:2] in device_endpoint_keys_b:
+                    resource_key_values = list(resource_key_values)
+                    resource_key_values[0] = link_endpoint_a['device']
+                    resource_key_values[1] = link_endpoint_a['egress_ep']
+                else:
+                    continue
+
+                device_keys = compute_device_keys(resource_key_values[0], device_name_mapping)
+                device_names = {device_key for device_key in device_keys if RE_UUID.match(device_key) is None}
+                if len(device_names) != 1:
+                    MSG = 'Unable to identify name for Device({:s}): device_keys({:s})'
+                    raise Exception(MSG.format(str(resource_key_values[0]), str(device_keys)))
+                resource_key_values[0] = device_names.pop()
+
+                endpoint_keys = compute_endpoint_keys(device_keys, resource_key_values[1], endpoint_name_mapping)
+                endpoint_names = {endpoint_key for endpoint_key in endpoint_keys if RE_UUID.match(endpoint_key) is None}
+                if len(endpoint_names) != 1:
+                    MSG = 'Unable to identify name for Endpoint({:s}): endpoint_keys({:s})'
+                    raise Exception(MSG.format(str(resource_key_values[1]), str(endpoint_keys)))
+                resource_key_values[1] = endpoint_names.pop()
+
+                resource_value : Dict = json.loads(config_rule['custom']['resource_value'])
+                if 'neighbor_address' not in resource_value: continue
+                resource_value['ip_address'] = resource_value.pop('neighbor_address')
+
+                # remove neighbor_address also from original rule as it is already consumed
+
+                resource_key_template = TMPL_ENDPOINT_VLAN_SETTINGS if len(match.groups()) == 3 else TMPL_ENDPOINT_SETTINGS
+                generated_config_rule = copy.deepcopy(config_rule)
+                generated_config_rule['custom']['resource_key'] = resource_key_template.format(*resource_key_values)
+                generated_config_rule['custom']['resource_value'] = json.dumps(resource_value)
+                generated_config_rules.append(generated_config_rule)
             else:
-                continue
-
-            device_keys = compute_device_keys(resource_key_values[0], device_name_mapping)
-            device_names = {device_key for device_key in device_keys if RE_UUID.match(device_key) is None}
-            if len(device_names) != 1:
-                MSG = 'Unable to identify name for Device({:s}): device_keys({:s})'
-                raise Exception(MSG.format(str(resource_key_values[0]), str(device_keys)))
-            resource_key_values[0] = device_names.pop()
-
-            endpoint_keys = compute_endpoint_keys(device_keys, resource_key_values[1], endpoint_name_mapping)
-            endpoint_names = {endpoint_key for endpoint_key in endpoint_keys if RE_UUID.match(endpoint_key) is None}
-            if len(endpoint_names) != 1:
-                MSG = 'Unable to identify name for Endpoint({:s}): endpoint_keys({:s})'
-                raise Exception(MSG.format(str(resource_key_values[1]), str(endpoint_keys)))
-            resource_key_values[1] = endpoint_names.pop()
-
-            resource_value : Dict = json.loads(config_rule['custom']['resource_value'])
-            if 'neighbor_address' not in resource_value: continue
-            resource_value['ip_address'] = resource_value.pop('neighbor_address')
-
-            # remove neighbor_address also from original rule as it is already consumed
-
-            resource_key_template = TMPL_ENDPOINT_VLAN_SETTINGS if len(match.groups()) == 3 else TMPL_ENDPOINT_SETTINGS
-            generated_config_rule = copy.deepcopy(config_rule)
-            generated_config_rule['custom']['resource_key'] = resource_key_template.format(*resource_key_values)
-            generated_config_rule['custom']['resource_value'] = json.dumps(resource_value)
-            generated_config_rules.append(generated_config_rule)
+
+                LOGGER.debug('[generate_neighbor_endpoint_config_rules] PON_ACCESS: {:s}'.format(str(config_rule)))
+                resource_value : Dict = config_rule['pon_access']
+                generated_config_rule = copy.deepcopy(config_rule)
+                generated_config_rule['pon_access'] = resource_value
+                generated_config_rules.append(generated_config_rule)
 
     LOGGER.debug('[generate_neighbor_endpoint_config_rules] generated_config_rules={:s}'.format(str(generated_config_rules)))
     LOGGER.debug('[generate_neighbor_endpoint_config_rules] end')
diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py
index ae567d9d65e4d971930fecb0971672f5bdb1ab73..0e298c7e9de5f289604a84964f6d06046f9ed218 100644
--- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py
+++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py
@@ -42,13 +42,14 @@ OPTICAL_DEVICE_TYPES = {
     DeviceTypeEnum.OPTICAL_TRANSPONDER, DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER,
 }
 
-SERVICE_TYPE_L2NM = {ServiceTypeEnum.SERVICETYPE_L2NM}
-SERVICE_TYPE_L3NM = {ServiceTypeEnum.SERVICETYPE_L3NM}
-SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM}
-SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE}
+SERVICE_TYPE_L2NM   = {ServiceTypeEnum.SERVICETYPE_L2NM}
+SERVICE_TYPE_L3NM   = {ServiceTypeEnum.SERVICETYPE_L3NM}
+SERVICE_TYPE_LXNM   = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM}
+SERVICE_TYPE_TAPI   = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE}
+SERVICE_TYPE_PON_ACCESS = {ServiceTypeEnum.SERVICETYPE_PON_ACCESS}
 
 def get_service_type(device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum) -> ServiceTypeEnum:
-    if device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM: return prv_service_type
+    if device_type in PACKET_DEVICE_TYPES and (prv_service_type in SERVICE_TYPE_LXNM or prv_service_type in SERVICE_TYPE_PON_ACCESS): return prv_service_type
     if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM
     if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE
     if device_type in NETWORK_DEVICE_TYPES: return prv_service_type
diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java b/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java
index e3265ff32e3e1f93a3bfd3007aff7e80d3154d8f..eab1c499477bf8be402ac2e3ec85c221aa06de04 100644
--- a/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java
+++ b/src/policy/src/main/java/org/etsi/tfs/policy/Serializer.java
@@ -1177,6 +1177,8 @@ public class Serializer {
                 return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_L3NM;
             case TAPI_CONNECTIVITY_SERVICE:
                 return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE;
+            case PON_ACCESS:
+                return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_PON_ACCESS;
             case UNKNOWN:
                 return ContextOuterClass.ServiceTypeEnum.SERVICETYPE_UNKNOWN;
             default:
@@ -1192,6 +1194,8 @@ public class Serializer {
                 return ServiceTypeEnum.L3NM;
             case SERVICETYPE_TAPI_CONNECTIVITY_SERVICE:
                 return ServiceTypeEnum.TAPI_CONNECTIVITY_SERVICE;
+            case SERVICETYPE_PON_ACCESS:
+                return ServiceTypeEnum.PON_ACCESS;
             case SERVICETYPE_UNKNOWN:
             case UNRECOGNIZED:
             default:
diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/context/model/ServiceTypeEnum.java b/src/policy/src/main/java/org/etsi/tfs/policy/context/model/ServiceTypeEnum.java
index d09c6da0614c76a29979277c6188c33f857e2346..d0c5c1e43efd4e3d7c2f1dab05f1c549fa55cd3e 100644
--- a/src/policy/src/main/java/org/etsi/tfs/policy/context/model/ServiceTypeEnum.java
+++ b/src/policy/src/main/java/org/etsi/tfs/policy/context/model/ServiceTypeEnum.java
@@ -20,5 +20,6 @@ public enum ServiceTypeEnum {
     UNKNOWN,
     L3NM,
     L2NM,
-    TAPI_CONNECTIVITY_SERVICE
+    TAPI_CONNECTIVITY_SERVICE,
+    PON_ACCESS
 }
diff --git a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
index d41b80f1d0efdbd5a345c1ebf41c8c25fb7157f7..555efb8e10ccd591e37f08a40ebb042746e6479c 100644
--- a/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
+++ b/src/policy/target/generated-sources/grpc/context/ContextOuterClass.java
@@ -519,6 +519,7 @@ public final class ContextOuterClass {
          * <code>SERVICETYPE_QKD = 7;</code>
          */
         SERVICETYPE_QKD(7),
+ 
         UNRECOGNIZED(-1);
 
         /**
diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py
index e47fd635f0c02667a052ebb9cff0569496c5fbec..acef98e5b963790b9421b662d996b3a4172f9c8c 100644
--- a/src/service/service/service_handler_api/FilterFields.py
+++ b/src/service/service/service_handler_api/FilterFields.py
@@ -28,6 +28,7 @@ SERVICE_TYPE_VALUES = {
     ServiceTypeEnum.SERVICETYPE_E2E,
     ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY,
     ServiceTypeEnum.SERVICETYPE_QKD,
+    ServiceTypeEnum.SERVICETYPE_PON_ACCESS,
 }
 
 DEVICE_DRIVER_VALUES = {
@@ -48,6 +49,7 @@ DEVICE_DRIVER_VALUES = {
     DeviceDriverEnum.DEVICEDRIVER_OC,
     DeviceDriverEnum.DEVICEDRIVER_QKD,
     DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN,
+    DeviceDriverEnum.DEVICEDRIVER_PON,
 }
 
 # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified
diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py
index 5607173ac86072b21312defb1299e9113d6064e3..5e5084d69f98f042cedd464601f8dbd762f41f1a 100644
--- a/src/service/service/service_handler_api/SettingsHandler.py
+++ b/src/service/service/service_handler_api/SettingsHandler.py
@@ -47,6 +47,13 @@ class SettingsHandler:
             ACL_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/index[{:d}]/acl_ruleset[{:s}]'
             key_or_path = ACL_KEY_TEMPLATE.format(device_uuid, endpoint_name,endpoint_index, acl_ruleset_name)
             value = grpc_message_to_json(config_rule.acl)
+        elif kind == 'pon_access':
+            device_uuid = config_rule.pon_access.endpoint_id.device_id.device_uuid.uuid
+            endpoint_uuid = config_rule.pon_access.endpoint_id.endpoint_uuid.uuid
+            endpoint_name, endpoint_index = extract_endpoint_index(endpoint_uuid)
+            PON_ACCESS_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/subindex[{:d}]/pon_access'
+            key_or_path = PON_ACCESS_KEY_TEMPLATE.format(device_uuid, endpoint_name, endpoint_index)
+            value = config_rule.pon_access
         else:
             MSG = 'Unsupported Kind({:s}) in ConfigRule({:s})'
             LOGGER.warning(MSG.format(str(kind), grpc_message_to_json_string(config_rule)))
@@ -105,7 +112,29 @@ class SettingsHandler:
                     if not 'index[{:d}]'.format(acl_index) in res_key: continue
                     acl_rules.append((res_key, res_value))
         return acl_rules
-
+    
+    def get_endpoint_pon_access(self, device : Device, endpoint : EndPoint) -> List [Tuple]:
+        endpoint_name = endpoint.name
+        device_keys   = device.device_id.device_uuid.uuid,       device.name
+        endpoint_keys = endpoint.endpoint_id.endpoint_uuid.uuid, endpoint.name
+        pon_accesses = []
+        for device_key in device_keys:
+            for endpoint_key in endpoint_keys:
+                endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]'.format(device_key, endpoint_key)
+                endpoint_settings = self.get(endpoint_settings_uri)
+                if endpoint_settings is None: continue  
+                PON_ACCESS_KEY_TEMPLATE = '/device[{:s}]/endpoint[{:s}]/'.format(device_key, endpoint_name)
+                
+                results = dump_subtree(endpoint_settings)
+                for res_key, res_value in results: 
+                    if not res_key.startswith(PON_ACCESS_KEY_TEMPLATE): continue
+                    if not "pon_access" in res_key: continue
+                    pon_accesses.append((res_key, res_value))
+                    ponaccess_index = extract_index(res_value)
+                    if not 'subindex[{:d}]'.format(ponaccess_index) in res_key: continue
+                    pon_accesses.append((res_key, res_value))
+        return pon_accesses
+    
     def set(self, key_or_path : Union[str, List[str]], value : Any) -> None:
         set_subnode_value(self.__resolver, self.__config, key_or_path, value)
 
diff --git a/src/service/service/service_handler_api/Tools.py b/src/service/service/service_handler_api/Tools.py
index 35c439ae176ac67adb8933fadc7ba12a703ce80b..425825793644b3422fc538c75b6997886d7ff232 100644
--- a/src/service/service/service_handler_api/Tools.py
+++ b/src/service/service/service_handler_api/Tools.py
@@ -68,9 +68,10 @@ def extract_endpoint_index(endpoint_name : str, default_index=0) -> Tuple[str, i
     if index is not None: index = int(index)
     return endpoint_name, index
 
-def extract_index(res_value : str) ->  int:
-    acl_value = grpc_message_to_json(res_value,use_integers_for_enums=True) 
-    endpoint  = acl_value.split("'endpoint_uuid': {'uuid': '")
+
+def extract_index(res_value : Any) ->  int:
+    res_value = grpc_message_to_json(res_value,use_integers_for_enums=True) 
+    endpoint  = res_value['endpoint_id']['endpoint_uuid']['uuid']
     endpoint  = endpoint[1].split("'}")
     _ , index = extract_endpoint_index(endpoint[0])
     return index
diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py
index 85545d238f2b93bd77b1beb1fce2d46b01b06800..b69f714f3bb55dcf0a4aa573e8b498ae3c1723b7 100644
--- a/src/service/service/service_handlers/__init__.py
+++ b/src/service/service/service_handlers/__init__.py
@@ -31,6 +31,8 @@ from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler
 from .e2e_orch.E2EOrchestratorServiceHandler import E2EOrchestratorServiceHandler
 from .oc.OCServiceHandler import OCServiceHandler
 from .qkd.qkd_service_handler import QKDServiceHandler
+from .pon_access.PON_AccessServiceHandler import PON_AccessServiceHandler
+
 
 SERVICE_HANDLERS = [
     (L2NMEmulatedServiceHandler, [
@@ -129,6 +131,15 @@ SERVICE_HANDLERS = [
             FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OC,
         }
     ]),
+
+    (PON_AccessServiceHandler, [
+        {
+            FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_PON_ACCESS,
+            FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_PON,
+        }
+    ]),
+
+
     (QKDServiceHandler, [
         {
             FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_QKD,
diff --git a/src/service/service/service_handlers/pon_access/ConfigRules.py b/src/service/service/service_handlers/pon_access/ConfigRules.py
new file mode 100644
index 0000000000000000000000000000000000000000..60d1b04cd320097af9b1cd5b629548ec6d710513
--- /dev/null
+++ b/src/service/service/service_handlers/pon_access/ConfigRules.py
@@ -0,0 +1,58 @@
+# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Any, Dict, List, Optional, Tuple
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from service.service.service_handler_api.AnyTreeTools import TreeNode
+LOGGER = logging.getLogger(__name__)
+
+def get_value(field_name : str, *containers, default=None) -> Optional[Any]:
+    if len(containers) == 0: raise Exception('No containers specified')
+    for container in containers:
+        if field_name not in container: continue
+        return container[field_name]
+    return default
+
+def setup_config_rules(
+    endpoint_name : str,  endpoint_pon_access : List [Tuple]
+) -> List[Dict]:
+
+    json_config_rules = [
+    ]
+    
+    for res_key, res_value in endpoint_pon_access:
+        json_config_rules.append(
+               {'action': 1, 'pon_access': res_value}
+            )
+
+    return json_config_rules
+
+def teardown_config_rules(
+    service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str,
+    service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode
+) -> List[Dict]:
+
+    if service_settings  is None: return []
+    if device_settings   is None: return []
+    if endpoint_settings is None: return []
+
+    json_settings          : Dict = service_settings.value
+    json_device_settings   : Dict = device_settings.value
+    json_endpoint_settings : Dict = endpoint_settings.value
+
+    settings = (json_settings, json_endpoint_settings, json_device_settings)
+
+    json_config_rules = []
+    return json_config_rules
\ No newline at end of file
diff --git a/src/service/service/service_handlers/pon_access/PON_AccessServiceHandler.py b/src/service/service/service_handlers/pon_access/PON_AccessServiceHandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..86fd1f07a3f012f224a9c783b46f0085b581cf0a
--- /dev/null
+++ b/src/service/service/service_handlers/pon_access/PON_AccessServiceHandler.py
@@ -0,0 +1,157 @@
+# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json, logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type
+from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+from .ConfigRules import setup_config_rules, teardown_config_rules
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_openconfig'})
+
+class PON_AccessServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : TaskExecutor, **settings
+    ) -> None:
+        self.__service = service
+        self.__task_executor = task_executor
+        self.__settings_handler = SettingsHandler(service.service_config, **settings)
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        results = []
+        for endpoint in endpoints:
+            try:
+                device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
+
+                device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
+                endpoint_pon_access = self.__settings_handler.get_endpoint_pon_access(device_obj, endpoint_obj)
+                endpoint_name = endpoint_obj.name
+                json_config_rules = setup_config_rules(
+                    endpoint_name, endpoint_pon_access)
+
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
+                results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+        settings = self.__settings_handler.get('/settings')
+
+        results = []
+        for endpoint in endpoints:
+            try:
+                device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint)
+
+                device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+                device_settings = self.__settings_handler.get_device_settings(device_obj)
+                endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid)
+                endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj)
+                endpoint_name = endpoint_obj.name
+
+                json_config_rules = teardown_config_rules(
+                    service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name,
+                    settings, device_settings, endpoint_settings)
+
+                if len(json_config_rules) > 0:
+                    del device_obj.device_config.config_rules[:]
+                    for json_config_rule in json_config_rules:
+                        device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule))
+                    self.__task_executor.configure_device(device_obj)
+
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint)))
+                results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                resource_value = json.loads(resource[1])
+                self.__settings_handler.set(resource[0], resource_value)
+                results.append(True)
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]:
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        results = []
+        for resource in resources:
+            try:
+                self.__settings_handler.delete(resource[0])
+            except Exception as e: # pylint: disable=broad-except
+                LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource)))
+                results.append(e)
+
+        return results
\ No newline at end of file
diff --git a/src/service/service/service_handlers/pon_access/__init__.py b/src/service/service/service_handlers/pon_access/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..906dd19f3c948b03263251f60addb49e2fb522dc
--- /dev/null
+++ b/src/service/service/service_handlers/pon_access/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022-2025 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/src/webui/service/static/topology_icons/pon-controller.png b/src/webui/service/static/topology_icons/pon-controller.png
new file mode 100644
index 0000000000000000000000000000000000000000..83fc862e82ba003f8c7a48c18f5aff912f4a78ae
Binary files /dev/null and b/src/webui/service/static/topology_icons/pon-controller.png differ