diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py
index bddc8ee6e3d9114754b4076d483ce8c9c62ee875..a17ed309189148637714a2076a7689446907e78f 100644
--- a/src/service/service/service_handlers/__init__.py
+++ b/src/service/service/service_handlers/__init__.py
@@ -26,6 +26,7 @@ from .l3nm_nce.L3NMNCEServiceHandler import L3NMNCEServiceHandler
 from .l3slice_ietfslice.L3SliceIETFSliceServiceHandler import L3NMSliceIETFSliceServiceHandler
 from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler
 from .p4_dummy_l1.p4_dummy_l1_service_handler import P4DummyL1ServiceHandler
+from .p4_fabric_tna_int.p4_fabric_tna_int_service_handler import P4FabricINTServiceHandler
 from .tapi_tapi.TapiServiceHandler import TapiServiceHandler
 from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler
 from .e2e_orch.E2EOrchestratorServiceHandler import E2EOrchestratorServiceHandler
@@ -111,6 +112,12 @@ SERVICE_HANDLERS = [
             FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4,
         }
     ]),
+    (P4FabricINTServiceHandler, [
+        {
+            FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_INT,
+            FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4,
+        }
+    ]),
     (L2NM_IETFL2VPN_ServiceHandler, [
         {
             FilterFieldEnum.SERVICE_TYPE  : ServiceTypeEnum.SERVICETYPE_L2NM,
diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2
--- /dev/null
+++ b/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fb22ee97d80ec03e7eca87e29772cc71141b1ff
--- /dev/null
+++ b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py
@@ -0,0 +1,425 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common objects and methods for In-band Network Telemetry (INT) dataplane
+based on the SD-Fabric dataplane model.
+This dataplane covers both software based and hardware-based Stratum-enabled P4 switches,
+such as the BMv2 software switch and Intel's Tofino/Tofino-2 switches.
+
+SD-Fabric repo: https://github.com/stratum/fabric-tna
+SD-Fabric docs: https://docs.sd-fabric.org/master/index.html
+"""
+
+import logging
+from typing import List, Tuple
+from common.proto.context_pb2 import ConfigActionEnum
+from common.tools.object_factory.ConfigRule import json_config_rule
+from common.type_checkers.Checkers import chk_address_ipv4, chk_transport_port
+
+from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import *
+
+LOGGER = logging.getLogger(__name__)
+
+# INT service handler settings
+INT_COLLECTOR_INFO = "int_collector_info"
+INT_REPORT_MIRROR_ID_LIST = "int_report_mirror_id_list"
+PORT_INT = "int_port"           # In-band Network Telemetry transport port (of the collector)
+
+# INT tables
+TABLE_INT_WATCHLIST = "FabricIngress.int_watchlist.watchlist"
+TABLE_INT_EGRESS_REPORT = "FabricEgress.int_egress.report"
+
+# Mirror IDs for INT reports
+INT_REPORT_MIRROR_ID_LIST_TNA = [0x200, 0x201, 0x202, 0x203]  # Tofino-2 (2-pipe Tofino switches use only the first 2 entries)
+INT_REPORT_MIRROR_ID_LIST_V1MODEL = [0x1FA]                   # Variable V1MODEL_INT_MIRROR_SESSION in p4 source program
+
+# INT report types
+INT_REPORT_TYPE_NO_REPORT = 0
+INT_REPORT_TYPE_FLOW = 1
+INT_REPORT_TYPE_QUEUE = 2
+INT_REPORT_TYPE_DROP = 4
+
+
+def rules_set_up_int_watchlist(action : ConfigActionEnum) -> List [Tuple]: # type: ignore
+    rule_no = cache_rule(TABLE_INT_WATCHLIST, action)
+
+    rules_int_watchlist = []
+    rules_int_watchlist.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_WATCHLIST+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_WATCHLIST,
+                'match-fields': [
+                    {
+                        'match-field': 'ipv4_valid',
+                        'match-value': '1'
+                    }
+                ],
+                'action-name': 'FabricIngress.int_watchlist.mark_to_report',
+                'action-params': [],
+                'priority': 1
+            }
+        )
+    )
+
+    return rules_int_watchlist
+
+def rules_set_up_int_report_collector(
+        int_collector_ip : str,
+        action : ConfigActionEnum) -> List [Tuple]: # type: ignore
+    assert chk_address_ipv4(int_collector_ip), "Invalid INT collector IPv4 address to configure watchlist"
+
+    rule_no = cache_rule(TABLE_INT_WATCHLIST, action)
+
+    rules_int_col_report = []
+    rules_int_col_report.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_WATCHLIST+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_WATCHLIST,
+                'match-fields': [
+                    {
+                        'match-field': 'ipv4_valid',
+                        'match-value': '1'
+                    },
+                    {
+                        'match-field': 'ipv4_dst',
+                        'match-value': int_collector_ip+'&&&0xFFFFFFFF'
+                    }
+                ],
+                'action-name': 'FabricIngress.int_watchlist.no_report_collector',
+                'action-params': [],
+                'priority': 10
+            }
+        )
+    )
+
+    return rules_int_col_report
+
+def rules_set_up_int_recirculation_ports(
+        recirculation_port_list : List,
+        port_type : str,
+        fwd_type : int,
+        vlan_id : int,
+        action : ConfigActionEnum): # type: ignore
+    rules_list = []
+
+    for port in recirculation_port_list:
+        rules_list.extend(
+            rules_set_up_port(
+                port=port,
+                port_type=port_type,
+                fwd_type=fwd_type,
+                vlan_id=vlan_id,
+                action=action
+            )
+        )
+
+    LOGGER.debug("INT recirculation ports configured:{}".format(recirculation_port_list))
+
+    return rules_list
+
+def rules_set_up_int_report_flow(
+        switch_id : int,
+        src_ip : str,
+        int_collector_ip : str,
+        int_collector_port : int,
+        action : ConfigActionEnum) -> List [Tuple]: # type: ignore
+    assert switch_id > 0, "Invalid switch identifier to configure egress INT report"
+    assert chk_address_ipv4(src_ip), "Invalid source IPv4 address to configure egress INT report"
+    assert chk_address_ipv4(int_collector_ip), "Invalid INT collector IPv4 address to configure egress INT report"
+    assert chk_transport_port(int_collector_port), "Invalid INT collector port number to configure egress INT report"
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    rules_int_egress = []
+
+    # Rule #1
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_INT_INGRESS_DROP)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INVALID)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_DROP)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_drop_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    # Rule #2
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INT_REPORT)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_DROP)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_drop_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    # Rule #3
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INT_REPORT)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_FLOW)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_local_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    # Rule #4
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_DEFLECTED)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INVALID)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_DROP)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_drop_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    # Rule #5
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INT_REPORT)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_QUEUE)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_local_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action)
+
+    # Rule #6
+    rules_int_egress.append(
+        json_config_rule(
+            action,
+            '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']',
+            {
+                'table-name': TABLE_INT_EGRESS_REPORT,
+                'match-fields': [
+                    {
+                        'match-field': 'bmd_type',
+                        'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR)
+                    },
+                    {
+                        'match-field': 'mirror_type',
+                        'match-value': str(MIRROR_TYPE_INT_REPORT)
+                    },
+                    {
+                        'match-field': 'int_report_type',
+                        'match-value': str(INT_REPORT_TYPE_QUEUE | INT_REPORT_TYPE_FLOW)
+                    }
+                ],
+                'action-name': 'FabricEgress.int_egress.do_local_report_encap',
+                'action-params': [
+                    {
+                        'action-param': 'switch_id',
+                        'action-value': str(switch_id)
+                    },
+                    {
+                        'action-param': 'src_ip',
+                        'action-value': src_ip
+                    },
+                    {
+                        'action-param': 'mon_ip',
+                        'action-value': int_collector_ip
+                    },
+                    {
+                        'action-param': 'mon_port',
+                        'action-value': str(int_collector_port)
+                    }
+                ]
+            }
+        )
+    )
+
+    return rules_int_egress
diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..efdac5feab62a6f6d2ac2f3bc552e4ec52dbf1b3
--- /dev/null
+++ b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py
@@ -0,0 +1,467 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Service handler for P4-based In-band Network Telemetry (INT) v0.5.
+The spec. is based on P4.org Application WG INT Dataplane
+Specification v0.5 (2017-12):
+
+https://p4.org/p4-spec/docs/INT_v0_5.pdf
+"""
+
+import logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type, chk_address_mac, chk_address_ipv4,\
+    chk_transport_port, chk_vlan_id
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.service_handler_api.SettingsHandler import SettingsHandler
+from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import *
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+
+from .p4_fabric_tna_int_config import *
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_int'})
+
+class P4FabricINTServiceHandler(_ServiceHandler):
+    def __init__(   # pylint: disable=super-init-not-called
+        self, service : Service, task_executor : TaskExecutor, **settings # type: ignore
+    ) -> None:
+        """ Initialize Driver.
+            Parameters:
+                service
+                    The service instance (gRPC message) to be managed.
+                task_executor
+                    An instance of Task Executor providing access to the
+                    service handlers factory, the context and device clients,
+                    and an internal cache of already-loaded gRPC entities.
+                **settings
+                    Extra settings required by the service handler.
+
+        """
+        self.__service_label = "P4 In-band Network Telemetry (INT) connectivity service"
+        self.__service = service
+        self.__task_executor = task_executor
+        self.__settings_handler = SettingsHandler(self.__service.service_config, **settings)
+
+        self._init_settings()
+        self._parse_settings()
+        self._print_settings()
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Create/Update service endpoints from a list.
+            Parameters:
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid and, optionally, the topology_uuid
+                    of the endpoint to be added.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint changes requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly added, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        LOGGER.info("{} - Provision service configuration".format(
+            self.__service_label))
+
+        visited = set()
+        results = []
+        for endpoint in endpoints:
+            device_uuid, _ = endpoint[0:2]
+            device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+
+            # Skip already visited devices
+            if device.name in visited:
+                continue
+            LOGGER.info("Device {} - Setting up In-band Network Telemetry (INT) configuration".format(
+                device.name))
+
+            rules = []
+            actual_rules = -1
+            applied_rules, failed_rules = 0, -1
+
+            # Create and apply rules
+            try:
+                rules = self._create_rules(device_obj=device, action=ConfigActionEnum.CONFIGACTION_SET)
+                actual_rules = len(rules)
+                applied_rules, failed_rules = apply_rules(
+                    task_executor=self.__task_executor,
+                    device_obj=device,
+                    json_config_rules=rules
+                )
+            except Exception as ex:
+                LOGGER.error("Failed to insert INT rules on device {} due to {}".format(device.name, ex))
+            finally:
+                rules.clear()
+
+            # Ensure correct status
+            results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \
+                else results.append(False)
+
+            # You should no longer visit this device again
+            visited.add(device.name)
+
+            LOGGER.info("Installed {}/{} INT rules on device {}".format(
+                applied_rules, actual_rules, device.name))
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Delete service endpoints from a list.
+            Parameters:
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid, and the topology_uuid of the endpoint
+                    to be removed.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint deletions requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly deleted, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        LOGGER.info("{} - Deprovision service configuration".format(
+            self.__service_label))
+
+        visited = set()
+        results = []
+        for endpoint in endpoints:
+            device_uuid, _ = endpoint[0:2]
+            device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+
+            # Skip already visited devices
+            if device.name in visited:
+                continue
+            LOGGER.info("Device {} - Removing In-band Network Telemetry (INT) configuration".format(
+                device.name))
+
+            rules = []
+            actual_rules = -1
+            applied_rules, failed_rules = 0, -1
+
+            # Create and apply rules
+            try:
+                rules = self._create_rules(device_obj=device, action=ConfigActionEnum.CONFIGACTION_DELETE)
+                actual_rules = len(rules)
+                applied_rules, failed_rules = apply_rules(
+                task_executor=self.__task_executor, device_obj=device, json_config_rules=rules)
+            except Exception as ex:
+                LOGGER.error("Failed to delete INT rules from device {} due to {}".format(device.name, ex))
+            finally:
+                rules.clear()
+
+            # Ensure correct status
+            results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \
+                else results.append(False)
+
+            # You should no longer visit this device again
+            visited.add(device.name)
+
+            LOGGER.info("Deleted {}/{} INT rules from device {}".format(
+                applied_rules, actual_rules, device.name))
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update service constraints.
+            Parameters:
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type and the
+                    new constraint_value to be set.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for constraint changes requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete service constraints.
+            Parameters:
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type pointing
+                    to the constraint to be deleted, and a constraint_value
+                    containing possible additionally required values to locate
+                    the constraint to be removed.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for constraint deletions requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update configuration for a list of service resources.
+            Parameters:
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value
+                    containing the new value to be set.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for resource key changes requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(resources)))
+        return [True for _ in range(len(resources))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete configuration for a list of service resources.
+            Parameters:
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value containing
+                    possible additionally required values to locate the value
+                    to be removed.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for resource key deletions requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(resources)))
+        return [True for _ in range(len(resources))]
+
+    def _init_settings(self):
+        self.__switch_info = {}
+        self.__int_collector_info = {}
+        self.__int_collector_mac = ""
+        self.__int_collector_ip = ""
+        self.__int_collector_port = -1
+        self.__int_vlan_id = -1
+
+        try:
+            self.__settings = self.__settings_handler.get('/settings')
+            LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings))
+        except Exception as ex:
+            self.__settings = {}
+            LOGGER.error("Failed to parse service settings: {}".format(ex))
+
+    def _default_settings(self):
+        switch_info = {
+            "p4-sw1": {
+                ARCH: TARGET_ARCH_V1MODEL,
+                DPID: 1,
+                MAC: "fa:16:3e:93:8c:c0",
+                IP: "10.10.10.120",
+                PORT_INT: {
+                    PORT_ID: 3,
+                    PORT_TYPE: "host"
+                },
+                RECIRCULATION_PORT_LIST: RECIRCULATION_PORTS_V1MODEL,
+                INT_REPORT_MIRROR_ID_LIST: INT_REPORT_MIRROR_ID_LIST_V1MODEL
+            }
+        }
+        int_collector_info = {
+            MAC: "fa:16:3e:fb:cf:96",
+            IP: "10.10.10.41",
+            PORT: 32766,
+            VLAN_ID: 4094
+        }
+        self.__settings = {
+            SWITCH_INFO: switch_info,
+            INT_COLLECTOR_INFO: int_collector_info
+        }
+
+    def _parse_settings(self):
+        #TODO: Pass settings in a correct way
+        try:
+            self.__switch_info = self.__settings[SWITCH_INFO]
+        except Exception as ex:
+            LOGGER.error("Failed to parse settings: {}".format(ex))
+            self._default_settings() #TODO: Remove when bug is fixed
+            self.__switch_info = self.__settings[SWITCH_INFO]
+        assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys"
+
+        for switch_name, switch_info in self.__switch_info.items():
+            assert switch_name, "Invalid P4 switch name"
+            assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, mac, ip, and int_port items)"
+            assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \
+                "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST))
+            assert switch_info[DPID] > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID])
+            assert chk_address_mac(switch_info[MAC]), "Switch {} - Invalid source Ethernet address".format(switch_name)
+            assert chk_address_ipv4(switch_info[IP]), "Switch {} - Invalid source IP address".format(switch_name)
+            assert isinstance(switch_info[PORT_INT], dict), "Switch {} - INT port object must be a map with port_id and port_type items".format(switch_name)
+            assert switch_info[PORT_INT][PORT_ID] >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name)
+            assert switch_info[PORT_INT][PORT_TYPE] in PORT_TYPES_STR_VALID, "Switch {} - Valid P4 switch port types are: {}".format(
+                switch_name, ','.join(PORT_TYPES_STR_VALID))
+            if arch_tna(switch_info[ARCH]):
+                switch_info[RECIRCULATION_PORT_LIST] = RECIRCULATION_PORTS_TNA
+                switch_info[INT_REPORT_MIRROR_ID_LIST] = INT_REPORT_MIRROR_ID_LIST_TNA
+            else:
+                switch_info[RECIRCULATION_PORT_LIST] = RECIRCULATION_PORTS_V1MODEL
+                switch_info[INT_REPORT_MIRROR_ID_LIST] = INT_REPORT_MIRROR_ID_LIST_V1MODEL
+            assert isinstance(switch_info[RECIRCULATION_PORT_LIST], list), "Switch {} - Recirculation ports must be described as a list".format(switch_name)
+
+        self.__int_collector_info = self.__settings[INT_COLLECTOR_INFO]
+        assert isinstance(self.__int_collector_info, dict), "INT collector info object must be a map with mac, ip, port, and vlan_id keys)"
+
+        self.__int_collector_mac = self.__int_collector_info[MAC]
+        assert chk_address_mac(self.__int_collector_mac), "Invalid P4 INT collector MAC address"
+
+        self.__int_collector_ip = self.__int_collector_info[IP]
+        assert chk_address_ipv4(self.__int_collector_ip), "Invalid P4 INT collector IPv4 address"
+
+        self.__int_collector_port = self.__int_collector_info[PORT]
+        assert chk_transport_port(self.__int_collector_port), "Invalid P4 INT collector transport port"
+
+        self.__int_vlan_id = self.__int_collector_info[VLAN_ID]
+        assert chk_vlan_id(self.__int_vlan_id), "Invalid VLAN ID"
+
+    def _print_settings(self):
+        LOGGER.info("-------------------- {} settings --------------------".format(self.__service.name))
+        LOGGER.info("--- Topology info")
+        for switch_name, switch_info in self.__switch_info.items():
+            LOGGER.info("\t Device {}".format(switch_name))
+            LOGGER.info("\t\t|  Target P4 architecture: {}".format(switch_info[ARCH]))
+            LOGGER.info("\t\t|           Data plane ID: {}".format(switch_info[DPID]))
+            LOGGER.info("\t\t|      Source MAC address: {}".format(switch_info[MAC]))
+            LOGGER.info("\t\t|      Source  IP address: {}".format(switch_info[IP]))
+            LOGGER.info("\t\t|           INT port   ID: {}".format(switch_info[PORT_INT][PORT_ID]))
+            LOGGER.info("\t\t|           INT port type: {}".format(switch_info[PORT_INT][PORT_TYPE]))
+            LOGGER.info("\t\t| Recirculation port list: {}".format(switch_info[RECIRCULATION_PORT_LIST]))
+            LOGGER.info("\t\t|   Report mirror ID list: {}".format(switch_info[INT_REPORT_MIRROR_ID_LIST]))
+        LOGGER.info("--- INT collector  MAC: {}".format(self.__int_collector_mac))
+        LOGGER.info("--- INT collector   IP: {}".format(self.__int_collector_ip))
+        LOGGER.info("--- INT collector port: {}".format(self.__int_collector_port))
+        LOGGER.info("--- INT        VLAN ID: {}".format(self.__int_vlan_id))
+        LOGGER.info("-----------------------------------------------------------------")
+
+    def _create_rules(self, device_obj : Device, action : ConfigActionEnum): # type: ignore
+        dev_name = device_obj.name
+        rules  = []
+
+        try:
+            ### INT reporting rules
+            rules += rules_set_up_int_watchlist(action=action)
+            rules += rules_set_up_int_recirculation_ports(
+                recirculation_port_list=self.__switch_info[dev_name][RECIRCULATION_PORT_LIST],
+                port_type=PORT_TYPE_INT,
+                fwd_type=FORWARDING_TYPE_UNICAST_IPV4,
+                vlan_id=self.__int_vlan_id,
+                action=action
+            )
+            rules += rules_set_up_int_report_collector(
+                int_collector_ip=self.__int_collector_ip,
+                action=action
+            )
+            rules += rules_set_up_int_report_flow(
+                switch_id=self.__switch_info[dev_name][DPID],
+                src_ip=self.__switch_info[dev_name][IP],
+                int_collector_ip=self.__int_collector_ip,
+                int_collector_port=self.__int_collector_port,
+                action=action
+            )
+            rules += rules_set_up_report_mirror_flow(
+                recirculation_port_list=self.__switch_info[dev_name][RECIRCULATION_PORT_LIST],
+                report_mirror_id_list=self.__switch_info[dev_name][INT_REPORT_MIRROR_ID_LIST],
+                action=action
+            )
+            ### INT port setup rules
+            rules += rules_set_up_port(
+                port=self.__switch_info[dev_name][PORT_INT][PORT_ID],
+                port_type=PORT_TYPE_HOST,
+                fwd_type=FORWARDING_TYPE_BRIDGING,
+                vlan_id=self.__int_vlan_id,
+                action=action
+            )
+            ### INT port forwarding rules
+            rules += rules_set_up_fwd_bridging(
+                vlan_id=self.__int_vlan_id,
+                eth_dst=self.__int_collector_mac,
+                egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID],
+                action=action
+            )
+            rules += rules_set_up_next_output_simple(
+                egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID],
+                action=action
+            )
+            ### INT packet routing rules
+            rules += rules_set_up_next_routing_simple(
+                egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID],
+                eth_src=self.__switch_info[dev_name][MAC],
+                eth_dst=self.__int_collector_mac,
+                action=action
+            )
+            rules += rules_set_up_routing(
+                ipv4_dst=self.__int_collector_ip,
+                ipv4_prefix_len=32,
+                egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID],
+                action=action
+            )
+        except Exception as ex:
+            LOGGER.error("Error while creating rules")
+            raise Exception(ex)
+
+        return rules
diff --git a/src/tests/p4-fabric-tna/README.md b/src/tests/p4-fabric-tna/README.md
index fc49276a990694f3de9cbaef1c7d4272e92d6004..10303009dbf533df4efd451b03f6359d38df203c 100644
--- a/src/tests/p4-fabric-tna/README.md
+++ b/src/tests/p4-fabric-tna/README.md
@@ -120,6 +120,25 @@ cd ~/tfs-ctrl/
 bash src/tests/p4-fabric-tna/run_test_02b_sbi_deprovision_int_l2_l3_acl.sh
 ```
 
+### Step 3: Manage L2, L3, ACL, and INT via the Service API
+
+To avoid interacting with the switch using low-level P4 rules (via the SBI), we created modular network services, which allow users to easily provision L2, L3, ACL, and INT network functions.
+These services require users to define the service endpoints as well as some high-level service configuration, while leaving the rest of complexity to tailored service handlers that interact with the SBI on behalf of the user.
+
+#### Provision INT service via the Service API
+
+```shell
+cd ~/tfs-ctrl/
+bash src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh
+```
+
+#### Deprovision INT service via the Service API
+
+```shell
+cd ~/tfs-ctrl/
+bash src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh
+```
+
 ### Step 4: Deprovision topology
 
 Delete all the objects (context, topology, links, devices) from TFS:
diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-int.json b/src/tests/p4-fabric-tna/descriptors/service-create-int.json
new file mode 100644
index 0000000000000000000000000000000000000000..98956b9595ac459ba93d46948f8ec46d78db68a9
--- /dev/null
+++ b/src/tests/p4-fabric-tna/descriptors/service-create-int.json
@@ -0,0 +1,53 @@
+{
+    "services": [
+        {
+            "service_id": {
+                "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-int"}
+            },
+            "name": "p4-service-int",
+            "service_type": "SERVICETYPE_INT",
+            "service_status": {"service_status": "SERVICESTATUS_PLANNED"},
+            "service_endpoint_ids": [
+                {
+                    "device_id": {"device_uuid": {"uuid": "p4-sw1"}},
+                    "endpoint_uuid": {"uuid": "1"}
+                },
+                {
+                    "device_id": {"device_uuid": {"uuid": "p4-sw1"}},
+                    "endpoint_uuid": {"uuid": "2"}
+                }
+            ],
+            "service_config": {
+                "config_rules": [
+                    {
+                        "action": "CONFIGACTION_SET",
+                        "custom": {
+                            "resource_key": "/settings",
+                            "resource_value": {
+                                "switch_info": {
+                                    "p4-sw1": {
+                                        "arch": "v1model",
+                                        "dpid": 1,
+                                        "mac": "fa:16:3e:93:8c:c0",
+                                        "ip": "10.10.10.120",
+                                        "int_port": {
+                                            "port_id": 3,
+                                            "port_type": "host"
+                                        }
+                                    }
+                                },
+                                "int_collector_info": {
+                                    "mac": "fa:16:3e:fb:cf:96",
+                                    "ip": "10.10.10.41",
+                                    "port": 32766,
+                                    "vlan_id": 4094
+                                }
+                            }
+                        }
+                    }
+                ]
+            },
+            "service_constraints": []
+        }
+    ]
+}
diff --git a/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh b/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh
new file mode 100755
index 0000000000000000000000000000000000000000..12bc82352c83b2ffa0e32cf196b9d4bee951bee4
--- /dev/null
+++ b/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py
diff --git a/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh b/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a501de77089d2200d5170ba85b980ee220d58e4f
--- /dev/null
+++ b/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tfs_runtime_env_vars.sh
+python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py
diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py
new file mode 100644
index 0000000000000000000000000000000000000000..f29f6b17c92bc851e9ee2dae03fb2e040aba409f
--- /dev/null
+++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py
@@ -0,0 +1,78 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from common.tools.object_factory.Service import json_service_id
+from context.client.ContextClient import ContextClient
+from service.client.ServiceClient import ServiceClient
+from tests.Fixtures import context_client, service_client # pylint: disable=unused-import
+from tests.tools.test_tools_p4 import *
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def test_service_deletion_int(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    service_client : ServiceClient  # pylint: disable=redefined-outer-name
+) -> None:
+    # Get the current number of devices
+    response = context_client.ListDevices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response)))
+
+    # Total devices
+    dev_nb = len(response.devices)
+    assert dev_nb == DEV_NB
+
+    # P4 devices
+    p4_dev_nb = identify_number_of_p4_devices(response.devices)
+    assert p4_dev_nb == P4_DEV_NB
+
+    # Get the current number of rules in the P4 devices
+    p4_rules_before_deletion = get_number_of_rules(response.devices)
+
+    # Get the current number of services
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    services_nb_before_deletion = len(response.services)
+    assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT)
+
+    for service in response.services:
+        # Ignore services of other types
+        if service.service_type != ServiceTypeEnum.SERVICETYPE_INT:
+            continue
+
+        service_id = service.service_id
+        assert service_id
+
+        service_uuid = service_id.service_uuid.uuid
+        context_uuid = service_id.context_id.context_uuid.uuid
+        assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE
+
+        # Delete INT service
+        service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid))))
+
+    # Get an updated view of the services
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    services_nb_after_deletion = len(response.services)
+    assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted"
+
+    # Get an updated view of the devices
+    response = context_client.ListDevices(ADMIN_CONTEXT_ID)
+    p4_rules_after_deletion = get_number_of_rules(response.devices)
+
+    rules_diff = p4_rules_before_deletion - p4_rules_after_deletion
+
+    assert p4_rules_after_deletion < p4_rules_before_deletion, "INT service must contain some rules"
+    assert rules_diff == P4_DEV_NB * INT_RULES, "INT service must contain {} rules per device".format(INT_RULES)
diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a875c66abae8139e4e7bc29451dab01490f4599
--- /dev/null
+++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py
@@ -0,0 +1,73 @@
+# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from common.proto.context_pb2 import ServiceTypeEnum
+from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results
+from common.tools.grpc.Tools import grpc_message_to_json_string
+from context.client.ContextClient import ContextClient
+from device.client.DeviceClient import DeviceClient
+from service.client.ServiceClient import ServiceClient
+from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import
+from tests.tools.test_tools_p4 import *
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+def test_service_creation_int(
+    context_client : ContextClient, # pylint: disable=redefined-outer-name
+    device_client  : DeviceClient,  # pylint: disable=redefined-outer-name
+    service_client : ServiceClient  # pylint: disable=redefined-outer-name
+) -> None:
+    # Get the current number of services
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    services_nb_before = len(response.services)
+
+    # Get the current number of devices
+    response = context_client.ListDevices(ADMIN_CONTEXT_ID)
+    LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response)))
+
+    # Total devices
+    dev_nb = len(response.devices)
+    assert dev_nb == DEV_NB
+
+    # P4 devices
+    p4_dev_nb = identify_number_of_p4_devices(response.devices)
+    assert p4_dev_nb == P4_DEV_NB
+
+    # Get the current number of rules in the P4 devices
+    p4_rules_before = get_number_of_rules(response.devices)
+
+    # Load service
+    descriptor_loader = DescriptorLoader(
+        descriptors_file=DESC_FILE_SERVICE_CREATE_INT,
+        context_client=context_client, device_client=device_client, service_client=service_client
+    )
+    results = descriptor_loader.process()
+    check_descriptor_load_results(results, descriptor_loader)
+
+    # Get an updated view of the services
+    response = context_client.ListServices(ADMIN_CONTEXT_ID)
+    services_nb_after = len(response.services)
+    assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place"
+    assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT)
+
+    # Get an updated view of the devices
+    response = context_client.ListDevices(ADMIN_CONTEXT_ID)
+    p4_rules_after = get_number_of_rules(response.devices)
+
+    rules_diff = p4_rules_after - p4_rules_before
+
+    assert p4_rules_after > p4_rules_before, "INT service must install some rules"
+    assert rules_diff == P4_DEV_NB * INT_RULES, "INT service must install {} rules per device".format(INT_RULES)