diff --git a/hackfest3 b/hackfest3
new file mode 120000
index 0000000000000000000000000000000000000000..2816e4af9a9b4f4b06651710e87d93f4d5db1f0b
--- /dev/null
+++ b/hackfest3
@@ -0,0 +1 @@
+src/tests/hackfest3/
\ No newline at end of file
diff --git a/my_deploy.sh b/my_deploy.sh
index ab1d2695241b2362c00e3fff0ff4363ca82401ad..d3f2b5566ec47e58bad906ffa465d22842de2776 100755
--- a/my_deploy.sh
+++ b/my_deploy.sh
@@ -59,7 +59,7 @@ export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yam
 export TFS_GRAFANA_PASSWORD="admin123+"
 
 # Disable skip-build flag to rebuild the Docker images.
-export TFS_SKIP_BUILD="YES"
+export TFS_SKIP_BUILD=""
 
 
 # ----- CockroachDB ------------------------------------------------------------
diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4/p4_service_handler.py
index 558f6a590620ec96e4dd3db88599acd037041268..41cfcc5952601a16a13cd691f2e424017936aaa3 100644
--- a/src/service/service/service_handlers/p4/p4_service_handler.py
+++ b/src/service/service/service_handlers/p4/p4_service_handler.py
@@ -71,48 +71,6 @@ def create_rule_del(endpoint_a, endpoint_b):
             ]
         }
     )
-    
-def create_int_set(endpoint_a, id):
-    return json_config_rule_set(
-        'table',
-        {
-            'table-name': 'EgressPipeImpl.int_table',
-       	    'match-fields': [
-                {
-                    'match-field': 'standard_metadata.ingress_port',
-                    'match-value': endpoint_a
-                }
-            ],
-            'action-name': 'EgressPipeImpl.add_int_header',
-            'action-params': [
-                {
-                    'action-param': 'swid',
-                    'action-value': id
-                }
-            ]
-        }
-    )
-    
-def create_int_del(endpoint_a, id):
-    return json_config_rule_delete(
-        'table',
-        {
-            'table-name': 'EgressPipeImpl.int_table',
-       	    'match-fields': [
-                {
-                    'match-field': 'standard_metadata.ingress_port',
-                    'match-value': endpoint_a
-                }
-            ],
-            'action-name': 'EgressPipeImpl.add_int_header',
-            'action-params': [
-                {
-                    'action-param': 'swid',
-                    'action-value': id
-                }
-            ]
-        }
-    )
 
 def find_names(uuid_a, uuid_b, device_endpoints):
     endpoint_a, endpoint_b = None, None
@@ -198,9 +156,6 @@ class P4ServiceHandler(_ServiceHandler):
                     # The other way
                     rule = create_rule_set(endpoint_b, endpoint_a) 
                     device.device_config.config_rules.append(ConfigRule(**rule))
-                    
-                    rule = create_int_set(endpoint_a, device.name[-1])
-                    device.device_config.config_rules.append(ConfigRule(**rule))
 
                     self.__task_executor.configure_device(device)
             
@@ -273,9 +228,6 @@ class P4ServiceHandler(_ServiceHandler):
                     rule = create_rule_del(endpoint_b, endpoint_a) 
                     device.device_config.config_rules.append(ConfigRule(**rule))
 
-                    rule = create_int_del(endpoint_a, device.name[-1])
-                    device.device_config.config_rules.append(ConfigRule(**rule))
-
                     self.__task_executor.configure_device(device)
             
                     results.append(True)
diff --git a/src/tests/hackfest3/new-probe/install-scapy.sh b/src/tests/hackfest3/int/install-scapy.sh
similarity index 100%
rename from src/tests/hackfest3/new-probe/install-scapy.sh
rename to src/tests/hackfest3/int/install-scapy.sh
diff --git a/src/tests/hackfest3/int/qdepth_int_basic.p4 b/src/tests/hackfest3/int/qdepth_int_basic.p4
new file mode 100644
index 0000000000000000000000000000000000000000..6bef091b96f4a4b59b50a3d97224e003abe2acf0
--- /dev/null
+++ b/src/tests/hackfest3/int/qdepth_int_basic.p4
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <core.p4>
+#include <v1model.p4>
+
+typedef bit<9>   port_num_t;
+typedef bit<48>  mac_addr_t;
+
+//------------------------------------------------------------------------------
+// HEADER DEFINITIONS
+//------------------------------------------------------------------------------
+
+#define MAX_INT_HEADERS 9
+
+const bit<16> TYPE_IPV4 = 0x800;
+const bit<5>  IPV4_OPTION_INT = 31;
+
+typedef bit<9>  egressSpec_t;
+typedef bit<48> macAddr_t;
+typedef bit<32> ip4Addr_t;
+
+typedef bit<13> switch_id_t;
+typedef bit<13> queue_depth_t;
+typedef bit<6>  output_port_t;
+
+header ethernet_t {
+    macAddr_t dstAddr;
+    macAddr_t srcAddr;
+    bit<16>   etherType;
+}
+
+header ipv4_t {
+    bit<4>    version;
+    bit<4>    ihl;
+    bit<6>    dscp;
+    bit<2>    ecn;
+    bit<16>   totalLen;
+    bit<16>   identification;
+    bit<3>    flags;
+    bit<13>   fragOffset;
+    bit<8>    ttl;
+    bit<8>    protocol;
+    bit<16>   hdrChecksum;
+    ip4Addr_t srcAddr;
+    ip4Addr_t dstAddr;
+}
+
+header ipv4_option_t {
+    bit<1> copyFlag;
+    bit<2> optClass;
+    bit<5> option;
+    bit<8> optionLength;
+}
+
+header int_count_t {
+    bit<16>   num_switches;
+}
+
+header int_header_t {
+    switch_id_t switch_id;
+    queue_depth_t queue_depth;
+    output_port_t output_port;
+}
+
+
+struct parser_metadata_t {
+    bit<16> num_headers_remaining;
+}
+
+struct local_metadata_t {
+    parser_metadata_t  parser_metadata;
+}
+
+struct parsed_headers_t {
+    ethernet_t   ethernet;
+    ipv4_t       ipv4;
+    ipv4_option_t ipv4_option;
+    int_count_t   int_count;
+    int_header_t[MAX_INT_HEADERS] int_headers;
+}
+
+error { IPHeaderWithoutOptions }
+
+//------------------------------------------------------------------------------
+// INGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+parser ParserImpl(packet_in packet,
+                out parsed_headers_t hdr,
+                inout local_metadata_t local_metadata,
+                inout standard_metadata_t standard_metadata) {
+
+    state start {
+
+        packet.extract(hdr.ethernet);
+        transition select(hdr.ethernet.etherType){
+            TYPE_IPV4: parse_ipv4;
+            default: accept;
+        }
+    }
+
+    state parse_ipv4 {
+        packet.extract(hdr.ipv4);
+        //Check if ihl is bigger than 5. Packets without ip options set ihl to 5.
+        verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions);
+        transition select(hdr.ipv4.ihl) {
+            5             : accept;
+            default       : parse_ipv4_option;
+        }
+    }
+
+    state parse_ipv4_option {
+        packet.extract(hdr.ipv4_option);
+        transition select(hdr.ipv4_option.option){
+
+            IPV4_OPTION_INT:  parse_int;
+            default: accept;
+
+        }
+     }
+
+    state parse_int {
+        packet.extract(hdr.int_count);
+        local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches;
+        transition select(local_metadata.parser_metadata.num_headers_remaining){
+            0: accept;
+            default: parse_int_headers;
+        }
+    }
+
+    state parse_int_headers {
+        packet.extract(hdr.int_headers.next);
+        local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ;
+        transition select(local_metadata.parser_metadata.num_headers_remaining){
+            0: accept;
+            default: parse_int_headers;
+        }
+    }
+}
+
+control VerifyChecksumImpl(inout parsed_headers_t hdr,
+                           inout local_metadata_t meta)
+{
+    apply { /* EMPTY */ }
+}
+
+
+control IngressPipeImpl (inout parsed_headers_t    hdr,
+                         inout local_metadata_t    local_metadata,
+                         inout standard_metadata_t standard_metadata) {
+
+    action drop() {
+        mark_to_drop(standard_metadata);
+    }
+
+    action set_egress_port(port_num_t port) {
+        standard_metadata.egress_spec = port;
+    }
+
+    // --- l2_exact_table ------------------
+
+    table l2_exact_table {
+        key = {
+            standard_metadata.ingress_port: exact;
+        }
+        actions = {
+            set_egress_port;
+            @defaultonly drop;
+        }
+        const default_action = drop;
+    }
+
+    apply {
+        l2_exact_table.apply();
+    }
+}
+
+//------------------------------------------------------------------------------
+// EGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+control EgressPipeImpl (inout parsed_headers_t hdr,
+                        inout local_metadata_t local_metadata,
+                        inout standard_metadata_t standard_metadata) {
+
+    
+    action add_int_header(switch_id_t swid){
+        //increase int stack counter by one
+        hdr.int_count.num_switches = hdr.int_count.num_switches + 1;
+        hdr.int_headers.push_front(1);
+        // This was not needed in older specs. Now by default pushed
+        // invalid elements are
+        hdr.int_headers[0].setValid();
+        hdr.int_headers[0].switch_id = (bit<13>)swid;
+        hdr.int_headers[0].queue_depth = (bit<13>)standard_metadata.deq_qdepth;
+        hdr.int_headers[0].output_port = (bit<6>)standard_metadata.egress_port;
+
+        //update ip header length
+        hdr.ipv4.ihl = hdr.ipv4.ihl + 1;
+        hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4;
+        hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4;
+    }
+
+    table int_table {
+        actions = {
+            add_int_header;
+            NoAction;
+        }
+        default_action = add_int_header(1);
+    }
+
+    apply {
+        if (hdr.int_count.isValid()){
+            int_table.apply();
+        }
+    }
+}
+
+
+control ComputeChecksumImpl(inout parsed_headers_t hdr,
+                            inout local_metadata_t local_metadata)
+{
+    apply {
+        update_checksum(
+	          hdr.ipv4.isValid(),
+            { hdr.ipv4.version,
+	            hdr.ipv4.ihl,
+              hdr.ipv4.dscp,
+              hdr.ipv4.ecn,
+              hdr.ipv4.totalLen,
+              hdr.ipv4.identification,
+              hdr.ipv4.flags,
+              hdr.ipv4.fragOffset,
+              hdr.ipv4.ttl,
+              hdr.ipv4.protocol,
+              hdr.ipv4.srcAddr,
+              hdr.ipv4.dstAddr },
+            hdr.ipv4.hdrChecksum,
+            HashAlgorithm.csum16);
+    }
+}
+
+control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
+    apply {
+
+        //parsed headers have to be added again into the packet.
+        packet.emit(hdr.ethernet);
+        packet.emit(hdr.ipv4);
+        packet.emit(hdr.ipv4_option);
+        packet.emit(hdr.int_count);
+        packet.emit(hdr.int_headers);
+
+    }
+}
+
+V1Switch(
+    ParserImpl(),
+    VerifyChecksumImpl(),
+    IngressPipeImpl(),
+    EgressPipeImpl(),
+    ComputeChecksumImpl(),
+    DeparserImpl()
+) main;
diff --git a/src/tests/hackfest3/new-probe/receive.py b/src/tests/hackfest3/int/receive.py
similarity index 100%
rename from src/tests/hackfest3/new-probe/receive.py
rename to src/tests/hackfest3/int/receive.py
diff --git a/src/tests/hackfest3/new-probe/send.py b/src/tests/hackfest3/int/send.py
similarity index 100%
rename from src/tests/hackfest3/new-probe/send.py
rename to src/tests/hackfest3/int/send.py
diff --git a/src/tests/hackfest3/int/solution/p4_service_handler.py b/src/tests/hackfest3/int/solution/p4_service_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..558f6a590620ec96e4dd3db88599acd037041268
--- /dev/null
+++ b/src/tests/hackfest3/int/solution/p4_service_handler.py
@@ -0,0 +1,389 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+P4 service handler for the TeraFlowSDN controller.
+"""
+
+import logging
+from typing import Any, List, Optional, Tuple, Union
+from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method
+from common.proto.context_pb2 import ConfigRule, DeviceId, Service
+from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set
+from common.tools.object_factory.Device import json_device_id
+from common.type_checkers.Checkers import chk_type
+from service.service.service_handler_api._ServiceHandler import _ServiceHandler
+from service.service.task_scheduler.TaskExecutor import TaskExecutor
+
+LOGGER = logging.getLogger(__name__)
+
+METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'})
+
+def create_rule_set(endpoint_a, endpoint_b):
+    return json_config_rule_set(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'standard_metadata.ingress_port',
+                    'match-value': endpoint_a
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port',
+                    'action-value': endpoint_b
+                }
+            ]
+        }
+    )
+
+def create_rule_del(endpoint_a, endpoint_b):
+    return json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'IngressPipeImpl.l2_exact_table',
+            'match-fields': [
+                {
+                    'match-field': 'standard_metadata.ingress_port',
+                    'match-value': endpoint_a
+                }
+            ],
+            'action-name': 'IngressPipeImpl.set_egress_port',
+            'action-params': [
+                {
+                    'action-param': 'port',
+                    'action-value': endpoint_b
+                }
+            ]
+        }
+    )
+    
+def create_int_set(endpoint_a, id):
+    return json_config_rule_set(
+        'table',
+        {
+            'table-name': 'EgressPipeImpl.int_table',
+       	    'match-fields': [
+                {
+                    'match-field': 'standard_metadata.ingress_port',
+                    'match-value': endpoint_a
+                }
+            ],
+            'action-name': 'EgressPipeImpl.add_int_header',
+            'action-params': [
+                {
+                    'action-param': 'swid',
+                    'action-value': id
+                }
+            ]
+        }
+    )
+    
+def create_int_del(endpoint_a, id):
+    return json_config_rule_delete(
+        'table',
+        {
+            'table-name': 'EgressPipeImpl.int_table',
+       	    'match-fields': [
+                {
+                    'match-field': 'standard_metadata.ingress_port',
+                    'match-value': endpoint_a
+                }
+            ],
+            'action-name': 'EgressPipeImpl.add_int_header',
+            'action-params': [
+                {
+                    'action-param': 'swid',
+                    'action-value': id
+                }
+            ]
+        }
+    )
+
+def find_names(uuid_a, uuid_b, device_endpoints):
+    endpoint_a, endpoint_b = None, None
+    for endpoint in device_endpoints:
+        if endpoint.endpoint_id.endpoint_uuid.uuid == uuid_a:
+            endpoint_a = endpoint.name
+        elif endpoint.endpoint_id.endpoint_uuid.uuid == uuid_b:
+            endpoint_b = endpoint.name
+            
+    return (endpoint_a, endpoint_b)
+
+class P4ServiceHandler(_ServiceHandler):
+    def __init__(self,
+                 service: Service,
+                 task_executor : TaskExecutor,
+                 **settings) -> None:
+        """ Initialize Driver.
+            Parameters:
+                service
+                    The service instance (gRPC message) to be managed.
+                task_executor
+                    An instance of Task Executor providing access to the
+                    service handlers factory, the context and device clients,
+                    and an internal cache of already-loaded gRPC entities.
+                **settings
+                    Extra settings required by the service handler.
+        """
+        self.__service = service
+        self.__task_executor = task_executor # pylint: disable=unused-private-member
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]],
+        connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Create/Update service endpoints form a list.
+            Parameters:
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid and, optionally, the topology_uuid
+                    of the endpoint to be added.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint changes requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly added, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+
+        history = {}
+        
+        results = []
+        index = {}
+        i = 0
+        for endpoint in endpoints:        
+            device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
+            if device_uuid in history:       
+                try:
+                    matched_endpoint_uuid = history.pop(device_uuid)
+                    device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+
+                    del device.device_config.config_rules[:]
+                    
+                    # Find names from uuids
+                    (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints)
+                    if endpoint_a is None:
+                        LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid)))
+                        raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid)))
+                    if endpoint_b is None:
+                        LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid)))
+                        raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid)))
+
+                    # One way
+                    rule = create_rule_set(endpoint_a, endpoint_b) 
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+                    # The other way
+                    rule = create_rule_set(endpoint_b, endpoint_a) 
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+                    
+                    rule = create_int_set(endpoint_a, device.name[-1])
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+
+                    self.__task_executor.configure_device(device)
+            
+                    results.append(True)
+                    results[index[device_uuid]] = True
+                except Exception as e:
+                    LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
+                    results.append(e)
+            else:
+                history[device_uuid] = endpoint_uuid
+                index[device_uuid] = i
+                results.append(False)
+            i = i+1
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteEndpoint(
+        self, endpoints : List[Tuple[str, str, Optional[str]]],
+        connection_uuid : Optional[str] = None
+    ) -> List[Union[bool, Exception]]:
+        """ Delete service endpoints form a list.
+            Parameters:
+                endpoints: List[Tuple[str, str, Optional[str]]]
+                    List of tuples, each containing a device_uuid,
+                    endpoint_uuid, and the topology_uuid of the endpoint
+                    to be removed.
+                connection_uuid : Optional[str]
+                    If specified, is the UUID of the connection this endpoint is associated to.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for endpoint deletions requested.
+                    Return values must be in the same order as the requested
+                    endpoints. If an endpoint is properly deleted, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('endpoints', endpoints, list)
+        if len(endpoints) == 0: return []
+
+        service_uuid = self.__service.service_id.service_uuid.uuid
+
+        history = {}
+        
+        results = []
+        index = {}
+        i = 0
+        for endpoint in endpoints:        
+            device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now
+            if device_uuid in history:       
+                try:
+                    matched_endpoint_uuid = history.pop(device_uuid)
+                    device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
+
+                    del device.device_config.config_rules[:]
+
+                    # Find names from uuids
+                    (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints)
+                    if endpoint_a is None:
+                        LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid)))
+                        raise Exception('Unable to find name of endpoint({:s})'.format(str(matched_endpoint_uuid)))
+                    if endpoint_b is None:
+                        LOGGER.exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid)))
+                        raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid)))
+
+                    # One way
+                    rule = create_rule_del(endpoint_a, endpoint_b) 
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+                    # The other way
+                    rule = create_rule_del(endpoint_b, endpoint_a) 
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+
+                    rule = create_int_del(endpoint_a, device.name[-1])
+                    device.device_config.config_rules.append(ConfigRule(**rule))
+
+                    self.__task_executor.configure_device(device)
+            
+                    results.append(True)
+                    results[index[device_uuid]] = True
+                except Exception as e:
+                    LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint)))
+                    results.append(e)
+            else:
+                history[device_uuid] = endpoint_uuid
+                index[device_uuid] = i
+                results.append(False)
+            i = i+1
+
+        return results
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update service constraints.
+            Parameters:
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type and the
+                    new constraint_value to be set.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for constraint changes requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete service constraints.
+            Parameters:
+                constraints: List[Tuple[str, Any]]
+                    List of tuples, each containing a constraint_type pointing
+                    to the constraint to be deleted, and a constraint_value
+                    containing possible additionally required values to locate
+                    the constraint to be removed.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for constraint deletions requested.
+                    Return values must be in the same order as the requested
+                    constraints. If a constraint is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('constraints', constraints, list)
+        if len(constraints) == 0: return []
+
+        msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(constraints)))
+        return [True for _ in range(len(constraints))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def SetConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Create/Update configuration for a list of service resources.
+            Parameters:
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value
+                    containing the new value to be set.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for resource key changes requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly set, True must be
+                    returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(resources)))
+        return [True for _ in range(len(resources))]
+
+    @metered_subclass_method(METRICS_POOL)
+    def DeleteConfig(self, resources: List[Tuple[str, Any]]) \
+            -> List[Union[bool, Exception]]:
+        """ Delete configuration for a list of service resources.
+            Parameters:
+                resources: List[Tuple[str, Any]]
+                    List of tuples, each containing a resource_key pointing to
+                    the resource to be modified, and a resource_value containing
+                    possible additionally required values to locate the value
+                    to be removed.
+            Returns:
+                results: List[Union[bool, Exception]]
+                    List of results for resource key deletions requested.
+                    Return values must be in the same order as the requested
+                    resource keys. If a resource is properly deleted, True must
+                    be returned; otherwise, the Exception that is raised during
+                    the processing must be returned.
+        """
+        chk_type('resources', resources, list)
+        if len(resources) == 0: return []
+
+        msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.'
+        LOGGER.warning(msg.format(str(resources)))
+        return [True for _ in range(len(resources))]
\ No newline at end of file
diff --git a/src/tests/hackfest3/p4/qdepth.main.p4 b/src/tests/hackfest3/int/solution/qdepth_int_basic.p4
similarity index 100%
rename from src/tests/hackfest3/p4/qdepth.main.p4
rename to src/tests/hackfest3/int/solution/qdepth_int_basic.p4
diff --git a/src/tests/hackfest3/new-probe/receive2.py b/src/tests/hackfest3/int/solution/timestamp/receive2.py
similarity index 100%
rename from src/tests/hackfest3/new-probe/receive2.py
rename to src/tests/hackfest3/int/solution/timestamp/receive2.py
diff --git a/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4 b/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4
new file mode 100644
index 0000000000000000000000000000000000000000..5a70ad3401d3e74afddad491e8560f76ae18af0f
--- /dev/null
+++ b/src/tests/hackfest3/int/solution/timestamp/timestamp_int.p4
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <core.p4>
+#include <v1model.p4>
+
+typedef bit<9>   port_num_t;
+typedef bit<48>  mac_addr_t;
+
+//------------------------------------------------------------------------------
+// HEADER DEFINITIONS
+//------------------------------------------------------------------------------
+
+#define MAX_INT_HEADERS 9
+
+const bit<16> TYPE_IPV4 = 0x800;
+const bit<5>  IPV4_OPTION_INT = 31;
+
+typedef bit<9>  egressSpec_t;
+typedef bit<48> macAddr_t;
+typedef bit<32> ip4Addr_t;
+
+typedef bit<13> switch_id_t;
+typedef bit<32> queue_depth_t;
+
+header ethernet_t {
+    macAddr_t dstAddr;
+    macAddr_t srcAddr;
+    bit<16>   etherType;
+}
+
+header ipv4_t {
+    bit<4>    version;
+    bit<4>    ihl;
+    bit<6>    dscp;
+    bit<2>    ecn;
+    bit<16>   totalLen;
+    bit<16>   identification;
+    bit<3>    flags;
+    bit<13>   fragOffset;
+    bit<8>    ttl;
+    bit<8>    protocol;
+    bit<16>   hdrChecksum;
+    ip4Addr_t srcAddr;
+    ip4Addr_t dstAddr;
+}
+
+header ipv4_option_t {
+    bit<1> copyFlag;
+    bit<2> optClass;
+    bit<5> option;
+    bit<8> optionLength;
+}
+
+header int_count_t {
+    bit<16>   num_switches;
+}
+
+header int_header_t {
+    queue_depth_t timestamp;
+}
+
+
+struct parser_metadata_t {
+    bit<16> num_headers_remaining;
+}
+
+struct local_metadata_t {
+    parser_metadata_t  parser_metadata;
+}
+
+struct parsed_headers_t {
+    ethernet_t   ethernet;
+    ipv4_t       ipv4;
+    ipv4_option_t ipv4_option;
+    int_count_t   int_count;
+    int_header_t[MAX_INT_HEADERS] int_headers;
+}
+
+error { IPHeaderWithoutOptions }
+
+//------------------------------------------------------------------------------
+// INGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+parser ParserImpl(packet_in packet,
+                out parsed_headers_t hdr,
+                inout local_metadata_t local_metadata,
+                inout standard_metadata_t standard_metadata) {
+
+    state start {
+
+        packet.extract(hdr.ethernet);
+        transition select(hdr.ethernet.etherType){
+            TYPE_IPV4: parse_ipv4;
+            default: accept;
+        }
+    }
+
+    state parse_ipv4 {
+        packet.extract(hdr.ipv4);
+        //Check if ihl is bigger than 5. Packets without ip options set ihl to 5.
+        verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions);
+        transition select(hdr.ipv4.ihl) {
+            5             : accept;
+            default       : parse_ipv4_option;
+        }
+    }
+
+    state parse_ipv4_option {
+        packet.extract(hdr.ipv4_option);
+        transition select(hdr.ipv4_option.option){
+
+            IPV4_OPTION_INT:  parse_int;
+            default: accept;
+
+        }
+     }
+
+    state parse_int {
+        packet.extract(hdr.int_count);
+        local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches;
+        transition select(local_metadata.parser_metadata.num_headers_remaining){
+            0: accept;
+            default: parse_int_headers;
+        }
+    }
+
+    state parse_int_headers {
+        packet.extract(hdr.int_headers.next);
+        local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ;
+        transition select(local_metadata.parser_metadata.num_headers_remaining){
+            0: accept;
+            default: parse_int_headers;
+        }
+    }
+}
+
+control VerifyChecksumImpl(inout parsed_headers_t hdr,
+                           inout local_metadata_t meta)
+{
+    apply { /* EMPTY */ }
+}
+
+
+control IngressPipeImpl (inout parsed_headers_t    hdr,
+                         inout local_metadata_t    local_metadata,
+                         inout standard_metadata_t standard_metadata) {
+
+    action drop() {
+        mark_to_drop(standard_metadata);
+    }
+
+    action set_egress_port(port_num_t port) {
+        standard_metadata.egress_spec = port;
+    }
+
+    // --- l2_exact_table ------------------
+
+    table l2_exact_table {
+        key = {
+            standard_metadata.ingress_port: exact;
+        }
+        actions = {
+            set_egress_port;
+            @defaultonly drop;
+        }
+        const default_action = drop;
+    }
+
+    apply {
+        l2_exact_table.apply();
+    }
+}
+
+//------------------------------------------------------------------------------
+// EGRESS PIPELINE
+//------------------------------------------------------------------------------
+
+control EgressPipeImpl (inout parsed_headers_t hdr,
+                        inout local_metadata_t local_metadata,
+                        inout standard_metadata_t standard_metadata) {
+
+    
+    action add_int_header(switch_id_t swid){
+        //increase int stack counter by one
+        hdr.int_count.num_switches = hdr.int_count.num_switches + 1;
+        hdr.int_headers.push_front(1);
+        // This was not needed in older specs. Now by default pushed
+        // invalid elements are
+        hdr.int_headers[0].setValid();
+        hdr.int_headers[0].timestamp = (bit<32>)standard_metadata.ingress_global_timestamp;
+
+        //update ip header length
+        hdr.ipv4.ihl = hdr.ipv4.ihl + 1;
+        hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4;
+        hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4;
+    }
+
+    table int_table {
+        key = {
+            standard_metadata.ingress_port: exact;
+        }
+        actions = {
+            add_int_header;
+            NoAction;
+        }
+        default_action = NoAction;
+    }
+
+    apply {
+        if (hdr.int_count.isValid()){
+            int_table.apply();
+        }
+    }
+}
+
+
+control ComputeChecksumImpl(inout parsed_headers_t hdr,
+                            inout local_metadata_t local_metadata)
+{
+    apply {
+        update_checksum(
+	          hdr.ipv4.isValid(),
+            { hdr.ipv4.version,
+	            hdr.ipv4.ihl,
+              hdr.ipv4.dscp,
+              hdr.ipv4.ecn,
+              hdr.ipv4.totalLen,
+              hdr.ipv4.identification,
+              hdr.ipv4.flags,
+              hdr.ipv4.fragOffset,
+              hdr.ipv4.ttl,
+              hdr.ipv4.protocol,
+              hdr.ipv4.srcAddr,
+              hdr.ipv4.dstAddr },
+            hdr.ipv4.hdrChecksum,
+            HashAlgorithm.csum16);
+    }
+}
+
+control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
+    apply {
+
+        //parsed headers have to be added again into the packet.
+        packet.emit(hdr.ethernet);
+        packet.emit(hdr.ipv4);
+        packet.emit(hdr.ipv4_option);
+        packet.emit(hdr.int_count);
+        packet.emit(hdr.int_headers);
+
+    }
+}
+
+V1Switch(
+    ParserImpl(),
+    VerifyChecksumImpl(),
+    IngressPipeImpl(),
+    EgressPipeImpl(),
+    ComputeChecksumImpl(),
+    DeparserImpl()
+) main;
diff --git a/src/tests/hackfest3/new-probe/agent.py b/src/tests/hackfest3/new-probe/agent.py
index 25a7aa45de5074609832d459fd9a81fb16a393df..3a89f0f1eb69168e188bdcc0881cf3fe97442d2c 100644
--- a/src/tests/hackfest3/new-probe/agent.py
+++ b/src/tests/hackfest3/new-probe/agent.py
@@ -44,14 +44,15 @@ monitoring_client = MonitoringClient(get_setting('MONITORINGSERVICE_SERVICE_HOST
 context_client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
 
 ### Locks and common variables
-enabled_lock = threading.Lock()
+# Lock for kpi_id
 kpi_id_lock = threading.Lock()
 kpi_id = KpiId()
+# Lock to know if we have registered a KPI or not
+enabled_lock = threading.Lock()
 enabled = False
 
 ### Define the path to the Unix socket
-socket_path = "/home/nuc8/tfs-develop/ngsdn-tutorial/tmp/sock"
-#socket_path = "./tmp/sock"
+socket_path = "/home/teraflow/ngsdn-tutorial/tmp/sock"
 if os.path.exists(socket_path):
     os.remove(socket_path)
 
@@ -59,34 +60,19 @@ def thread_context_func():
     global kpi_id
     global enabled
     while True:
-        # Listen to ContextService/GetServiceEvents stream 
-        events = context_client.GetServiceEvents(Empty())
-        for event in events:
-            event_service = event.service_id
-            event_service_uuid = event_service.service_uuid.uuid
-            event_type = event.event.event_type
-            if event_type == 1:
-                print(f"stream: New CREATE event:\n{event_service}")
-                kpi_descriptor = KpiDescriptor(
-                        kpi_id = None,
-                        kpi_id_list = [],
-                        device_id = None,
-                        endpoint_id = None,
-                        kpi_description = f"Loss Ratio for service {event_service_uuid}",
-                        service_id = event_service,
-                        kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN
-                        )
-                response = monitoring_client.SetKpi(kpi_descriptor)
-                print(response)
-                with kpi_id_lock:
-                    kpi_id = response
-                    print(kpi_id)
-                with enabled_lock:
-                    enabled = True
-            elif event_type == 3:
-                print(f"stream: New REMOVE event:\n{event_service}")
-                with enabled_lock:
-                    enabled = False
+##########################################################
+################## YOUR INPUT HERE #######################
+##########################################################
+        # Listen for Context Service Events
+        # Differentiate based on event type
+        # if event_type == service created:
+            # Create KpiDescriptor
+            # Register Kpi and keep kpi_id
+        # if event_type == service removed:
+            # stop sending values
+##########################################################
+##################### UNTIL HERE #########################
+##########################################################
 
 def thread_kpi_func():
     global kpi_id
@@ -94,50 +80,34 @@ def thread_kpi_func():
     try:
         # Create socket object
         server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
         # Bind the socket to the socket path
         server_socket.bind(socket_path)
-
         # Listen for incoming connections
         server_socket.listen(1)
-        
         while True:
             print("Awaiting for new connection!")
-
             # Accept incoming connection
             connection, client_address = server_socket.accept()
-
             # Read data from the connection
             data = connection.recv(1024)
-
             if data:
                 with enabled_lock:
                     if enabled: 
+##########################################################
+################## YOUR INPUT HERE #######################
+##########################################################
+                        # if we have registered a KPI
+                        #store value to data
                         data = data.decode()
                         print(f"Received: {data}")
                         with kpi_id_lock:
-                            
-                            now = time.time()
-
-                            new_timestamp = Timestamp()
-                            new_timestamp.timestamp = now
-
-                            new_value = KpiValue()
-                            new_value.floatVal = float(data)
-
-                            kpi = Kpi (
-                                    kpi_id = kpi_id,
-                                    timestamp = new_timestamp,
-                                    kpi_value = new_value
-                                    )
-                            print(kpi)
-                            response = monitoring_client.IncludeKpi(kpi) 
-                            print(f"response: {response}")
-
+                            # create Kpi
+                            # send Kpi to Monitoring
+##########################################################
+##################### UNTIL HERE #########################
+##########################################################
             # Close the connection 
             connection.close()
-
-    
     except Exception as e:
         print(f"Error: {str(e)}")
 
diff --git a/src/tests/hackfest3/new-probe/old/read.py b/src/tests/hackfest3/new-probe/old/read.py
deleted file mode 100644
index c91ddcea84072fbdeb194c2aca8440121022da71..0000000000000000000000000000000000000000
--- a/src/tests/hackfest3/new-probe/old/read.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import socket
-
-# Define the path to the Unix socket
-socket_path = "./tmp/sock"
-
-try:
-    # Create a socket object
-    server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
-    # Bind the socket to the socket path
-    server_socket.bind(socket_path)
-
-    # Listen for incoming connections
-    server_socket.listen(1)
-
-    print(f"Listening on {socket_path}...")
-
-    while True:
-        # Accept incoming connection
-        connection, client_address = server_socket.accept()
-
-        # Read data from the connection
-        data = connection.recv(1024)
-
-        if data:
-            print(f"Received: {data.decode()}")
-
-        # Close the connection
-        connection.close()
-
-except Exception as e:
-    print(f"Error: {str(e)}")
diff --git a/src/tests/hackfest3/new-probe/old/write.py b/src/tests/hackfest3/new-probe/old/write.py
deleted file mode 100644
index 5ca54873d1a9269284b6fe61034f37cd3efd2225..0000000000000000000000000000000000000000
--- a/src/tests/hackfest3/new-probe/old/write.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import socket
-
-# Define the path to the Unix socket
-socket_path = "./tmp/sock"
-
-# Data to be sent
-data = "hello"
-
-try:
-    # Create a socket object
-    client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
-    # Connect to the Unix socket
-    client_socket.connect(socket_path)
-
-    # Send the data
-    client_socket.send(data.encode())
-
-    # Close the socket
-    client_socket.close()
-
-    print("Sent to the socket.")
-except Exception as e:
-    print("Error")
-
-
diff --git a/src/tests/hackfest3/new-probe/ping2.py b/src/tests/hackfest3/new-probe/ping2.py
index d7c79717ff38cb99a8fdedad0af5334f7bc0e058..8b308d1200aec7d443868f954e4e03fda98bf792 100644
--- a/src/tests/hackfest3/new-probe/ping2.py
+++ b/src/tests/hackfest3/new-probe/ping2.py
@@ -1,52 +1,39 @@
 import socket, re, time, subprocess, sys
 
+# Path of the socket inside mininet container
 socket_path = "/tmp/sock"
-#socket_path = "./tmp/sock"
 
 def main():
     hostname = sys.argv[1]
-    count = 1
-    wait = 5
 
-    total_pings = 0
-    successful_pings = 0
     try:
         while True:
             start_time = time.time()
 
             try:
-                # Run the ping command and capture the output
-                result = subprocess.check_output(["ping", "-W", str(wait), "-c", str(count), hostname], universal_newlines=True)
-
-                response_time = float(re.findall(r"time=([0-9.]+) ms", result)[0])
-
+                # Run the ping command once and capture the output
+                response_time = 0
             except subprocess.CalledProcessError as e:
-                # If ping fails return negative response_time
+                # If ping fails (even if it does not reach destination)
+                # This part is executed 
                 response_time = -1
 
-            # Calculate new loss_ratio
-            if response_time != -1:
-                successful_pings += 1
-            total_pings += 1
-            moving_loss_ratio = round(((total_pings - successful_pings) / float(total_pings) * 100), 2)
-
-            print("Total pings: {}".format(total_pings))
-            print("Successful pings: {}".format(successful_pings))
-
-            print("Packet loss: {}%".format(moving_loss_ratio))
             print("Latency: {} ms".format(response_time))
 
-            data = str(moving_loss_ratio)
-
+            # Uncomment the following when ready to write to socket
+            #data = str(response_time)
+            #
             # Write results in socket
-            try:
-                client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-                client_socket.connect(socket_path)
-                client_socket.send(data.encode())
-                client_socket.close()
-            except Exception as e:
-                print(e)
-
+            #try:
+            #    client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+            #    client_socket.connect(socket_path)
+            #    client_socket.send(data.encode())
+            #    client_socket.close()
+            #except Exception as e:
+            #    print(e)
+
+            # The following is to make sure that we ping at least
+            # every 6 seconds regardless of how much time ping took.
             # Calculate the time taken by ping
             execution_time = time.time() - start_time
             # Wait the rest of the time
diff --git a/src/tests/hackfest3/new-probe/solution/agent.py b/src/tests/hackfest3/new-probe/solution/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a7aa45de5074609832d459fd9a81fb16a393df
--- /dev/null
+++ b/src/tests/hackfest3/new-probe/solution/agent.py
@@ -0,0 +1,165 @@
+# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#import copy, logging, pytest
+#from common.tests.EventTools import EVENT_CREATE, EVENT_UPDATE, check_events
+#from common.tools.object_factory.Context import json_context_id
+#from common.tools.object_factory.Device import json_device_id
+#from common.tools.object_factory.Service import json_service_id
+#from common.tools.object_factory.Link import json_link_id
+#from common.tools.object_factory.Topology import json_topology_id
+#from context.client.EventsCollector import EventsCollector
+#from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology, Service, ServiceId
+#from monitoring.client.MonitoringClient import MonitoringClient
+#from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId, DeviceOperationalStatusEnum
+
+import os, threading, time, socket
+from common.Settings import get_setting
+from common.proto.context_pb2 import Empty, Timestamp
+from common.proto.monitoring_pb2 import KpiDescriptor, Kpi, KpiId, KpiValue
+from common.proto.kpi_sample_types_pb2 import KpiSampleType
+from monitoring.client.MonitoringClient import MonitoringClient
+from context.client.ContextClient import ContextClient
+
+# ----- If you want to use .env file
+#from dotenv import load_dotenv
+#load_dotenv()
+#def get_setting(key):
+#    return os.getenv(key)
+
+
+#### gRPC Clients
+monitoring_client = MonitoringClient(get_setting('MONITORINGSERVICE_SERVICE_HOST'), get_setting('MONITORINGSERVICE_SERVICE_PORT_GRPC'))
+context_client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC'))
+
+### Locks and common variables
+enabled_lock = threading.Lock()
+kpi_id_lock = threading.Lock()
+kpi_id = KpiId()
+enabled = False
+
+### Define the path to the Unix socket
+socket_path = "/home/nuc8/tfs-develop/ngsdn-tutorial/tmp/sock"
+#socket_path = "./tmp/sock"
+if os.path.exists(socket_path):
+    os.remove(socket_path)
+
+def thread_context_func():
+    global kpi_id
+    global enabled
+    while True:
+        # Listen to ContextService/GetServiceEvents stream 
+        events = context_client.GetServiceEvents(Empty())
+        for event in events:
+            event_service = event.service_id
+            event_service_uuid = event_service.service_uuid.uuid
+            event_type = event.event.event_type
+            if event_type == 1:
+                print(f"stream: New CREATE event:\n{event_service}")
+                kpi_descriptor = KpiDescriptor(
+                        kpi_id = None,
+                        kpi_id_list = [],
+                        device_id = None,
+                        endpoint_id = None,
+                        kpi_description = f"Loss Ratio for service {event_service_uuid}",
+                        service_id = event_service,
+                        kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN
+                        )
+                response = monitoring_client.SetKpi(kpi_descriptor)
+                print(response)
+                with kpi_id_lock:
+                    kpi_id = response
+                    print(kpi_id)
+                with enabled_lock:
+                    enabled = True
+            elif event_type == 3:
+                print(f"stream: New REMOVE event:\n{event_service}")
+                with enabled_lock:
+                    enabled = False
+
+def thread_kpi_func():
+    global kpi_id
+    global enabled
+    try:
+        # Create socket object
+        server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+        # Bind the socket to the socket path
+        server_socket.bind(socket_path)
+
+        # Listen for incoming connections
+        server_socket.listen(1)
+        
+        while True:
+            print("Awaiting for new connection!")
+
+            # Accept incoming connection
+            connection, client_address = server_socket.accept()
+
+            # Read data from the connection
+            data = connection.recv(1024)
+
+            if data:
+                with enabled_lock:
+                    if enabled: 
+                        data = data.decode()
+                        print(f"Received: {data}")
+                        with kpi_id_lock:
+                            
+                            now = time.time()
+
+                            new_timestamp = Timestamp()
+                            new_timestamp.timestamp = now
+
+                            new_value = KpiValue()
+                            new_value.floatVal = float(data)
+
+                            kpi = Kpi (
+                                    kpi_id = kpi_id,
+                                    timestamp = new_timestamp,
+                                    kpi_value = new_value
+                                    )
+                            print(kpi)
+                            response = monitoring_client.IncludeKpi(kpi) 
+                            print(f"response: {response}")
+
+            # Close the connection 
+            connection.close()
+
+    
+    except Exception as e:
+        print(f"Error: {str(e)}")
+
+
+def main():
+
+    # Start Thread that listens to context events
+    thread_context = threading.Thread(target=thread_context_func)
+    thread_context.daemon = True
+    thread_context.start()
+
+    # Start Thread that listens to socket
+    thread_kpi = threading.Thread(target=thread_kpi_func)
+    thread_kpi.daemon = True
+    thread_kpi.start()
+
+    try:
+        while True:
+            time.sleep(1)
+    except KeyboardInterrupt:
+        os.remove(socket_path)
+        print("Script terminated.")
+
+if __name__ == "__main__":
+    main()
diff --git a/src/tests/hackfest3/new-probe/solution/ping2.py b/src/tests/hackfest3/new-probe/solution/ping2.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7c79717ff38cb99a8fdedad0af5334f7bc0e058
--- /dev/null
+++ b/src/tests/hackfest3/new-probe/solution/ping2.py
@@ -0,0 +1,61 @@
+import socket, re, time, subprocess, sys
+
+socket_path = "/tmp/sock"
+#socket_path = "./tmp/sock"
+
+def main():
+    hostname = sys.argv[1]
+    count = 1
+    wait = 5
+
+    total_pings = 0
+    successful_pings = 0
+    try:
+        while True:
+            start_time = time.time()
+
+            try:
+                # Run the ping command and capture the output
+                result = subprocess.check_output(["ping", "-W", str(wait), "-c", str(count), hostname], universal_newlines=True)
+
+                response_time = float(re.findall(r"time=([0-9.]+) ms", result)[0])
+
+            except subprocess.CalledProcessError as e:
+                # If ping fails return negative response_time
+                response_time = -1
+
+            # Calculate new loss_ratio
+            if response_time != -1:
+                successful_pings += 1
+            total_pings += 1
+            moving_loss_ratio = round(((total_pings - successful_pings) / float(total_pings) * 100), 2)
+
+            print("Total pings: {}".format(total_pings))
+            print("Successful pings: {}".format(successful_pings))
+
+            print("Packet loss: {}%".format(moving_loss_ratio))
+            print("Latency: {} ms".format(response_time))
+
+            data = str(moving_loss_ratio)
+
+            # Write results in socket
+            try:
+                client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+                client_socket.connect(socket_path)
+                client_socket.send(data.encode())
+                client_socket.close()
+            except Exception as e:
+                print(e)
+
+            # Calculate the time taken by ping
+            execution_time = time.time() - start_time
+            # Wait the rest of the time
+            wait_time = max(0, 6 - execution_time)
+            time.sleep(wait_time)
+
+    except KeyboardInterrupt:
+        print("Script terminated.")
+
+if __name__ == "__main__":
+    main()
+
diff --git a/src/tests/hackfest3/p4/backup/bmv2.json b/src/tests/hackfest3/p4/backup/bmv2.json
deleted file mode 100644
index f001eb52e90e875c4152f4d7820664402ac856c3..0000000000000000000000000000000000000000
--- a/src/tests/hackfest3/p4/backup/bmv2.json
+++ /dev/null
@@ -1,381 +0,0 @@
-{
-  "header_types" : [
-    {
-      "name" : "scalars_0",
-      "id" : 0,
-      "fields" : [
-        ["local_metadata_t.is_multicast", 1, false],
-        ["_padding_0", 7, false]
-      ]
-    },
-    {
-      "name" : "standard_metadata",
-      "id" : 1,
-      "fields" : [
-        ["ingress_port", 9, false],
-        ["egress_spec", 9, false],
-        ["egress_port", 9, false],
-        ["clone_spec", 32, false],
-        ["instance_type", 32, false],
-        ["drop", 1, false],
-        ["recirculate_port", 16, false],
-        ["packet_length", 32, false],
-        ["enq_timestamp", 32, false],
-        ["enq_qdepth", 19, false],
-        ["deq_timedelta", 32, false],
-        ["deq_qdepth", 19, false],
-        ["ingress_global_timestamp", 48, false],
-        ["egress_global_timestamp", 48, false],
-        ["lf_field_list", 32, false],
-        ["mcast_grp", 16, false],
-        ["resubmit_flag", 32, false],
-        ["egress_rid", 16, false],
-        ["recirculate_flag", 32, false],
-        ["checksum_error", 1, false],
-        ["parser_error", 32, false],
-        ["priority", 3, false],
-        ["_padding", 2, false]
-      ]
-    },
-    {
-      "name" : "ethernet_t",
-      "id" : 2,
-      "fields" : [
-        ["dst_addr", 48, false],
-        ["src_addr", 48, false],
-        ["ether_type", 16, false]
-      ]
-    }
-  ],
-  "headers" : [
-    {
-      "name" : "scalars",
-      "id" : 0,
-      "header_type" : "scalars_0",
-      "metadata" : true,
-      "pi_omit" : true
-    },
-    {
-      "name" : "standard_metadata",
-      "id" : 1,
-      "header_type" : "standard_metadata",
-      "metadata" : true,
-      "pi_omit" : true
-    },
-    {
-      "name" : "ethernet",
-      "id" : 2,
-      "header_type" : "ethernet_t",
-      "metadata" : false,
-      "pi_omit" : true
-    }
-  ],
-  "header_stacks" : [],
-  "header_union_types" : [],
-  "header_unions" : [],
-  "header_union_stacks" : [],
-  "field_lists" : [],
-  "errors" : [
-    ["NoError", 1],
-    ["PacketTooShort", 2],
-    ["NoMatch", 3],
-    ["StackOutOfBounds", 4],
-    ["HeaderTooShort", 5],
-    ["ParserTimeout", 6],
-    ["ParserInvalidArgument", 7]
-  ],
-  "enums" : [],
-  "parsers" : [
-    {
-      "name" : "parser",
-      "id" : 0,
-      "init_state" : "start",
-      "parse_states" : [
-        {
-          "name" : "start",
-          "id" : 0,
-          "parser_ops" : [
-            {
-              "parameters" : [
-                {
-                  "type" : "regular",
-                  "value" : "ethernet"
-                }
-              ],
-              "op" : "extract"
-            }
-          ],
-          "transitions" : [
-            {
-              "value" : "default",
-              "mask" : null,
-              "next_state" : null
-            }
-          ],
-          "transition_key" : []
-        }
-      ]
-    }
-  ],
-  "parse_vsets" : [],
-  "deparsers" : [
-    {
-      "name" : "deparser",
-      "id" : 0,
-      "source_info" : {
-        "filename" : "p4src/main.p4",
-        "line" : 130,
-        "column" : 8,
-        "source_fragment" : "DeparserImpl"
-      },
-      "order" : ["ethernet"]
-    }
-  ],
-  "meter_arrays" : [],
-  "counter_arrays" : [],
-  "register_arrays" : [],
-  "calculations" : [],
-  "learn_lists" : [],
-  "actions" : [
-    {
-      "name" : "IngressPipeImpl.drop",
-      "id" : 0,
-      "runtime_data" : [],
-      "primitives" : [
-        {
-          "op" : "mark_to_drop",
-          "parameters" : [
-            {
-              "type" : "header",
-              "value" : "standard_metadata"
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 77,
-            "column" : 8,
-            "source_fragment" : "mark_to_drop(standard_metadata)"
-          }
-        }
-      ]
-    },
-    {
-      "name" : "IngressPipeImpl.set_egress_port",
-      "id" : 1,
-      "runtime_data" : [
-        {
-          "name" : "port",
-          "bitwidth" : 9
-        }
-      ],
-      "primitives" : [
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["standard_metadata", "egress_spec"]
-            },
-            {
-              "type" : "runtime_data",
-              "value" : 0
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 81,
-            "column" : 8,
-            "source_fragment" : "standard_metadata.egress_spec = port"
-          }
-        }
-      ]
-    },
-    {
-      "name" : "IngressPipeImpl.set_multicast_group",
-      "id" : 2,
-      "runtime_data" : [
-        {
-          "name" : "gid",
-          "bitwidth" : 16
-        }
-      ],
-      "primitives" : [
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["standard_metadata", "mcast_grp"]
-            },
-            {
-              "type" : "runtime_data",
-              "value" : 0
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 89,
-            "column" : 8,
-            "source_fragment" : "standard_metadata.mcast_grp = gid"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["scalars", "local_metadata_t.is_multicast"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "b2d",
-                  "left" : null,
-                  "right" : {
-                    "type" : "bool",
-                    "value" : true
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 90,
-            "column" : 8,
-            "source_fragment" : "local_metadata.is_multicast = true"
-          }
-        }
-      ]
-    }
-  ],
-  "pipelines" : [
-    {
-      "name" : "ingress",
-      "id" : 0,
-      "source_info" : {
-        "filename" : "p4src/main.p4",
-        "line" : 71,
-        "column" : 8,
-        "source_fragment" : "IngressPipeImpl"
-      },
-      "init_table" : "IngressPipeImpl.l2_exact_table",
-      "tables" : [
-        {
-          "name" : "IngressPipeImpl.l2_exact_table",
-          "id" : 0,
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 95,
-            "column" : 10,
-            "source_fragment" : "l2_exact_table"
-          },
-          "key" : [
-            {
-              "match_type" : "exact",
-              "name" : "standard_metadata.ingress_port",
-              "target" : ["standard_metadata", "ingress_port"],
-              "mask" : null
-            }
-          ],
-          "match_type" : "exact",
-          "type" : "simple",
-          "max_size" : 1024,
-          "with_counters" : false,
-          "support_timeout" : false,
-          "direct_meters" : null,
-          "action_ids" : [1, 2, 0],
-          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"],
-          "base_default_next" : null,
-          "next_tables" : {
-            "IngressPipeImpl.set_egress_port" : null,
-            "IngressPipeImpl.set_multicast_group" : null,
-            "IngressPipeImpl.drop" : null
-          },
-          "default_entry" : {
-            "action_id" : 0,
-            "action_const" : true,
-            "action_data" : [],
-            "action_entry_const" : true
-          }
-        }
-      ],
-      "action_profiles" : [],
-      "conditionals" : []
-    },
-    {
-      "name" : "egress",
-      "id" : 1,
-      "source_info" : {
-        "filename" : "p4src/main.p4",
-        "line" : 116,
-        "column" : 8,
-        "source_fragment" : "EgressPipeImpl"
-      },
-      "init_table" : null,
-      "tables" : [],
-      "action_profiles" : [],
-      "conditionals" : []
-    }
-  ],
-  "checksums" : [],
-  "force_arith" : [],
-  "extern_instances" : [],
-  "field_aliases" : [
-    [
-      "queueing_metadata.enq_timestamp",
-      ["standard_metadata", "enq_timestamp"]
-    ],
-    [
-      "queueing_metadata.enq_qdepth",
-      ["standard_metadata", "enq_qdepth"]
-    ],
-    [
-      "queueing_metadata.deq_timedelta",
-      ["standard_metadata", "deq_timedelta"]
-    ],
-    [
-      "queueing_metadata.deq_qdepth",
-      ["standard_metadata", "deq_qdepth"]
-    ],
-    [
-      "intrinsic_metadata.ingress_global_timestamp",
-      ["standard_metadata", "ingress_global_timestamp"]
-    ],
-    [
-      "intrinsic_metadata.egress_global_timestamp",
-      ["standard_metadata", "egress_global_timestamp"]
-    ],
-    [
-      "intrinsic_metadata.lf_field_list",
-      ["standard_metadata", "lf_field_list"]
-    ],
-    [
-      "intrinsic_metadata.mcast_grp",
-      ["standard_metadata", "mcast_grp"]
-    ],
-    [
-      "intrinsic_metadata.resubmit_flag",
-      ["standard_metadata", "resubmit_flag"]
-    ],
-    [
-      "intrinsic_metadata.egress_rid",
-      ["standard_metadata", "egress_rid"]
-    ],
-    [
-      "intrinsic_metadata.recirculate_flag",
-      ["standard_metadata", "recirculate_flag"]
-    ],
-    [
-      "intrinsic_metadata.priority",
-      ["standard_metadata", "priority"]
-    ]
-  ],
-  "program" : "p4src/main.p4",
-  "__meta__" : {
-    "version" : [2, 18],
-    "compiler" : "https://github.com/p4lang/p4c"
-  }
-}
\ No newline at end of file
diff --git a/src/tests/hackfest3/p4/backup/main.p4 b/src/tests/hackfest3/p4/backup/main.p4
deleted file mode 100644
index 843eb0d580e362e74b25c768b1b01e750138637a..0000000000000000000000000000000000000000
--- a/src/tests/hackfest3/p4/backup/main.p4
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include <core.p4>
-#include <v1model.p4>
-
-typedef bit<9>   port_num_t;
-typedef bit<48>  mac_addr_t;
-typedef bit<16>  mcast_group_id_t;
-
-//------------------------------------------------------------------------------
-// HEADER DEFINITIONS
-//------------------------------------------------------------------------------
-
-header ethernet_t {
-    mac_addr_t  dst_addr;
-    mac_addr_t  src_addr;
-    bit<16>     ether_type;
-}
-
-struct parsed_headers_t {
-    ethernet_t  ethernet;
-}
-
-struct local_metadata_t {
-    bool        is_multicast;
-}
-
-
-//------------------------------------------------------------------------------
-// INGRESS PIPELINE
-//------------------------------------------------------------------------------
-
-parser ParserImpl (packet_in packet,
-                   out parsed_headers_t hdr,
-                   inout local_metadata_t local_metadata,
-                   inout standard_metadata_t standard_metadata)
-{
-    state start {
-      transition parse_ethernet;
-    }
-
-    state parse_ethernet {
-        packet.extract(hdr.ethernet);
-        transition accept;
-    }
-}
-
-
-control VerifyChecksumImpl(inout parsed_headers_t hdr,
-                           inout local_metadata_t meta)
-{
-    apply { /* EMPTY */ }
-}
-
-
-control IngressPipeImpl (inout parsed_headers_t    hdr,
-                         inout local_metadata_t    local_metadata,
-                         inout standard_metadata_t standard_metadata) {
-
-    // Drop action shared by many tables.
-    action drop() {
-        mark_to_drop(standard_metadata);
-    }
-
-    action set_egress_port(port_num_t port) {
-        standard_metadata.egress_spec = port;
-    }
-
-    action set_multicast_group(mcast_group_id_t gid) {
-        // gid will be used by the Packet Replication Engine (PRE) in the
-        // Traffic Manager--located right after the ingress pipeline, to
-        // replicate a packet to multiple egress ports, specified by the control
-        // plane by means of P4Runtime MulticastGroupEntry messages.
-        standard_metadata.mcast_grp = gid;
-        local_metadata.is_multicast = true;
-    }
-
-    // --- l2_exact_table ------------------
-
-    table l2_exact_table {
-        key = {
-            standard_metadata.ingress_port: exact;
-        }
-        actions = {
-            set_egress_port;
-            set_multicast_group;
-            @defaultonly drop;
-        }
-        const default_action = drop;
-    }
-
-    apply {
-        l2_exact_table.apply();
-    }
-}
-
-//------------------------------------------------------------------------------
-// EGRESS PIPELINE
-//------------------------------------------------------------------------------
-
-control EgressPipeImpl (inout parsed_headers_t hdr,
-                        inout local_metadata_t local_metadata,
-                        inout standard_metadata_t standard_metadata) {
-    apply { /* EMPTY */ }
-}
-
-
-control ComputeChecksumImpl(inout parsed_headers_t hdr,
-                            inout local_metadata_t local_metadata)
-{
-    apply { /* EMPTY */ }
-}
-
-
-control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
-    apply {
-        packet.emit(hdr.ethernet);
-    }
-}
-
-
-V1Switch(
-    ParserImpl(),
-    VerifyChecksumImpl(),
-    IngressPipeImpl(),
-    EgressPipeImpl(),
-    ComputeChecksumImpl(),
-    DeparserImpl()
-) main;
diff --git a/src/tests/hackfest3/p4/backup/p4info.txt b/src/tests/hackfest3/p4/backup/p4info.txt
deleted file mode 100644
index 0b58e740864b72e6ca87582431cd7bd57894d0dd..0000000000000000000000000000000000000000
--- a/src/tests/hackfest3/p4/backup/p4info.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-pkg_info {
-  arch: "v1model"
-}
-tables {
-  preamble {
-    id: 33605373
-    name: "IngressPipeImpl.l2_exact_table"
-    alias: "l2_exact_table"
-  }
-  match_fields {
-    id: 1
-    name: "standard_metadata.ingress_port"
-    bitwidth: 9
-    match_type: EXACT
-  }
-  action_refs {
-    id: 16812802
-  }
-  action_refs {
-    id: 16841371
-  }
-  action_refs {
-    id: 16796182
-    annotations: "@defaultonly"
-    scope: DEFAULT_ONLY
-  }
-  const_default_action_id: 16796182
-  size: 1024
-}
-actions {
-  preamble {
-    id: 16796182
-    name: "IngressPipeImpl.drop"
-    alias: "drop"
-  }
-}
-actions {
-  preamble {
-    id: 16812802
-    name: "IngressPipeImpl.set_egress_port"
-    alias: "set_egress_port"
-  }
-  params {
-    id: 1
-    name: "port"
-    bitwidth: 9
-  }
-}
-actions {
-  preamble {
-    id: 16841371
-    name: "IngressPipeImpl.set_multicast_group"
-    alias: "set_multicast_group"
-  }
-  params {
-    id: 1
-    name: "gid"
-    bitwidth: 16
-  }
-}
-type_info {
-}
diff --git a/src/tests/hackfest3/p4/bmv2.json b/src/tests/hackfest3/p4/bmv2.json
index d84c14a890c9d0906f1c5befabbfae3c97fdb1b3..f001eb52e90e875c4152f4d7820664402ac856c3 100644
--- a/src/tests/hackfest3/p4/bmv2.json
+++ b/src/tests/hackfest3/p4/bmv2.json
@@ -4,8 +4,7 @@
       "name" : "scalars_0",
       "id" : 0,
       "fields" : [
-        ["tmp", 1, false],
-        ["local_metadata_t._parser_metadata_num_headers_remaining0", 16, false],
+        ["local_metadata_t.is_multicast", 1, false],
         ["_padding_0", 7, false]
       ]
     },
@@ -42,54 +41,9 @@
       "name" : "ethernet_t",
       "id" : 2,
       "fields" : [
-        ["dstAddr", 48, false],
-        ["srcAddr", 48, false],
-        ["etherType", 16, false]
-      ]
-    },
-    {
-      "name" : "ipv4_t",
-      "id" : 3,
-      "fields" : [
-        ["version", 4, false],
-        ["ihl", 4, false],
-        ["dscp", 6, false],
-        ["ecn", 2, false],
-        ["totalLen", 16, false],
-        ["identification", 16, false],
-        ["flags", 3, false],
-        ["fragOffset", 13, false],
-        ["ttl", 8, false],
-        ["protocol", 8, false],
-        ["hdrChecksum", 16, false],
-        ["srcAddr", 32, false],
-        ["dstAddr", 32, false]
-      ]
-    },
-    {
-      "name" : "ipv4_option_t",
-      "id" : 4,
-      "fields" : [
-        ["copyFlag", 1, false],
-        ["optClass", 2, false],
-        ["option", 5, false],
-        ["optionLength", 8, false]
-      ]
-    },
-    {
-      "name" : "int_count_t",
-      "id" : 5,
-      "fields" : [
-        ["num_switches", 16, false]
-      ]
-    },
-    {
-      "name" : "int_header_t",
-      "id" : 6,
-      "fields" : [
-        ["switch_id", 13, false],
-        ["queue_depth", 13, false],
-        ["output_port", 6, false]
+        ["dst_addr", 48, false],
+        ["src_addr", 48, false],
+        ["ether_type", 16, false]
       ]
     }
   ],
@@ -114,101 +68,9 @@
       "header_type" : "ethernet_t",
       "metadata" : false,
       "pi_omit" : true
-    },
-    {
-      "name" : "ipv4",
-      "id" : 3,
-      "header_type" : "ipv4_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "ipv4_option",
-      "id" : 4,
-      "header_type" : "ipv4_option_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_count",
-      "id" : 5,
-      "header_type" : "int_count_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[0]",
-      "id" : 6,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[1]",
-      "id" : 7,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[2]",
-      "id" : 8,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[3]",
-      "id" : 9,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[4]",
-      "id" : 10,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[5]",
-      "id" : 11,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[6]",
-      "id" : 12,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[7]",
-      "id" : 13,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    },
-    {
-      "name" : "int_headers[8]",
-      "id" : 14,
-      "header_type" : "int_header_t",
-      "metadata" : false,
-      "pi_omit" : true
-    }
-  ],
-  "header_stacks" : [
-    {
-      "name" : "int_headers",
-      "id" : 0,
-      "header_type" : "int_header_t",
-      "size" : 9,
-      "header_ids" : [6, 7, 8, 9, 10, 11, 12, 13, 14]
     }
   ],
+  "header_stacks" : [],
   "header_union_types" : [],
   "header_unions" : [],
   "header_union_stacks" : [],
@@ -220,8 +82,7 @@
     ["StackOutOfBounds", 4],
     ["HeaderTooShort", 5],
     ["ParserTimeout", 6],
-    ["ParserInvalidArgument", 7],
-    ["IPHeaderWithoutOptions", 8]
+    ["ParserInvalidArgument", 7]
   ],
   "enums" : [],
   "parsers" : [
@@ -245,262 +106,13 @@
             }
           ],
           "transitions" : [
-            {
-              "type" : "hexstr",
-              "value" : "0x0800",
-              "mask" : null,
-              "next_state" : "parse_ipv4"
-            },
             {
               "value" : "default",
               "mask" : null,
               "next_state" : null
             }
           ],
-          "transition_key" : [
-            {
-              "type" : "field",
-              "value" : ["ethernet", "etherType"]
-            }
-          ]
-        },
-        {
-          "name" : "parse_ipv4",
-          "id" : 1,
-          "parser_ops" : [
-            {
-              "parameters" : [
-                {
-                  "type" : "regular",
-                  "value" : "ipv4"
-                }
-              ],
-              "op" : "extract"
-            },
-            {
-              "parameters" : [
-                {
-                  "type" : "field",
-                  "value" : ["scalars", "tmp"]
-                },
-                {
-                  "type" : "expression",
-                  "value" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "b2d",
-                      "left" : null,
-                      "right" : {
-                        "type" : "expression",
-                        "value" : {
-                          "op" : ">=",
-                          "left" : {
-                            "type" : "field",
-                            "value" : ["ipv4", "ihl"]
-                          },
-                          "right" : {
-                            "type" : "hexstr",
-                            "value" : "0x05"
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-              ],
-              "op" : "set"
-            },
-            {
-              "parameters" : [
-                {
-                  "type" : "expression",
-                  "value" : {
-                    "op" : "d2b",
-                    "left" : null,
-                    "right" : {
-                      "type" : "field",
-                      "value" : ["scalars", "tmp"]
-                    }
-                  }
-                },
-                {
-                  "type" : "hexstr",
-                  "value" : "0x8"
-                }
-              ],
-              "op" : "verify"
-            }
-          ],
-          "transitions" : [
-            {
-              "type" : "hexstr",
-              "value" : "0x05",
-              "mask" : null,
-              "next_state" : null
-            },
-            {
-              "value" : "default",
-              "mask" : null,
-              "next_state" : "parse_ipv4_option"
-            }
-          ],
-          "transition_key" : [
-            {
-              "type" : "field",
-              "value" : ["ipv4", "ihl"]
-            }
-          ]
-        },
-        {
-          "name" : "parse_ipv4_option",
-          "id" : 2,
-          "parser_ops" : [
-            {
-              "parameters" : [
-                {
-                  "type" : "regular",
-                  "value" : "ipv4_option"
-                }
-              ],
-              "op" : "extract"
-            }
-          ],
-          "transitions" : [
-            {
-              "type" : "hexstr",
-              "value" : "0x1f",
-              "mask" : null,
-              "next_state" : "parse_int"
-            },
-            {
-              "value" : "default",
-              "mask" : null,
-              "next_state" : null
-            }
-          ],
-          "transition_key" : [
-            {
-              "type" : "field",
-              "value" : ["ipv4_option", "option"]
-            }
-          ]
-        },
-        {
-          "name" : "parse_int",
-          "id" : 3,
-          "parser_ops" : [
-            {
-              "parameters" : [
-                {
-                  "type" : "regular",
-                  "value" : "int_count"
-                }
-              ],
-              "op" : "extract"
-            },
-            {
-              "parameters" : [
-                {
-                  "type" : "field",
-                  "value" : ["scalars", "local_metadata_t._parser_metadata_num_headers_remaining0"]
-                },
-                {
-                  "type" : "field",
-                  "value" : ["int_count", "num_switches"]
-                }
-              ],
-              "op" : "set"
-            }
-          ],
-          "transitions" : [
-            {
-              "type" : "hexstr",
-              "value" : "0x0000",
-              "mask" : null,
-              "next_state" : null
-            },
-            {
-              "value" : "default",
-              "mask" : null,
-              "next_state" : "parse_int_headers"
-            }
-          ],
-          "transition_key" : [
-            {
-              "type" : "field",
-              "value" : ["int_count", "num_switches"]
-            }
-          ]
-        },
-        {
-          "name" : "parse_int_headers",
-          "id" : 4,
-          "parser_ops" : [
-            {
-              "parameters" : [
-                {
-                  "type" : "stack",
-                  "value" : "int_headers"
-                }
-              ],
-              "op" : "extract"
-            },
-            {
-              "parameters" : [
-                {
-                  "type" : "field",
-                  "value" : ["scalars", "local_metadata_t._parser_metadata_num_headers_remaining0"]
-                },
-                {
-                  "type" : "expression",
-                  "value" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "&",
-                      "left" : {
-                        "type" : "expression",
-                        "value" : {
-                          "op" : "+",
-                          "left" : {
-                            "type" : "field",
-                            "value" : ["scalars", "local_metadata_t._parser_metadata_num_headers_remaining0"]
-                          },
-                          "right" : {
-                            "type" : "hexstr",
-                            "value" : "0xffff"
-                          }
-                        }
-                      },
-                      "right" : {
-                        "type" : "hexstr",
-                        "value" : "0xffff"
-                      }
-                    }
-                  }
-                }
-              ],
-              "op" : "set"
-            }
-          ],
-          "transitions" : [
-            {
-              "type" : "hexstr",
-              "value" : "0x0000",
-              "mask" : null,
-              "next_state" : null
-            },
-            {
-              "value" : "default",
-              "mask" : null,
-              "next_state" : "parse_int_headers"
-            }
-          ],
-          "transition_key" : [
-            {
-              "type" : "field",
-              "value" : ["scalars", "local_metadata_t._parser_metadata_num_headers_remaining0"]
-            }
-          ]
+          "transition_key" : []
         }
       ]
     }
@@ -512,79 +124,17 @@
       "id" : 0,
       "source_info" : {
         "filename" : "p4src/main.p4",
-        "line" : 258,
+        "line" : 130,
         "column" : 8,
         "source_fragment" : "DeparserImpl"
       },
-      "order" : ["ethernet", "ipv4", "ipv4_option", "int_count", "int_headers[0]", "int_headers[1]", "int_headers[2]", "int_headers[3]", "int_headers[4]", "int_headers[5]", "int_headers[6]", "int_headers[7]", "int_headers[8]"]
+      "order" : ["ethernet"]
     }
   ],
   "meter_arrays" : [],
   "counter_arrays" : [],
   "register_arrays" : [],
-  "calculations" : [
-    {
-      "name" : "calc",
-      "id" : 0,
-      "source_info" : {
-        "filename" : "p4src/main.p4",
-        "line" : 239,
-        "column" : 8,
-        "source_fragment" : "update_checksum( ..."
-      },
-      "algo" : "csum16",
-      "input" : [
-        {
-          "type" : "field",
-          "value" : ["ipv4", "version"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "ihl"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "dscp"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "ecn"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "totalLen"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "identification"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "flags"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "fragOffset"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "ttl"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "protocol"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "srcAddr"]
-        },
-        {
-          "type" : "field",
-          "value" : ["ipv4", "dstAddr"]
-        }
-      ]
-    }
-  ],
+  "calculations" : [],
   "learn_lists" : [],
   "actions" : [
     {
@@ -602,7 +152,7 @@
           ],
           "source_info" : {
             "filename" : "p4src/main.p4",
-            "line" : 168,
+            "line" : 77,
             "column" : 8,
             "source_fragment" : "mark_to_drop(standard_metadata)"
           }
@@ -633,7 +183,7 @@
           ],
           "source_info" : {
             "filename" : "p4src/main.p4",
-            "line" : 172,
+            "line" : 81,
             "column" : 8,
             "source_fragment" : "standard_metadata.egress_spec = port"
           }
@@ -641,18 +191,12 @@
       ]
     },
     {
-      "name" : "NoAction",
+      "name" : "IngressPipeImpl.set_multicast_group",
       "id" : 2,
-      "runtime_data" : [],
-      "primitives" : []
-    },
-    {
-      "name" : "EgressPipeImpl.add_int_header",
-      "id" : 3,
       "runtime_data" : [
         {
-          "name" : "swid",
-          "bitwidth" : 13
+          "name" : "gid",
+          "bitwidth" : 16
         }
       ],
       "primitives" : [
@@ -661,83 +205,7 @@
           "parameters" : [
             {
               "type" : "field",
-              "value" : ["int_count", "num_switches"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "+",
-                      "left" : {
-                        "type" : "field",
-                        "value" : ["int_count", "num_switches"]
-                      },
-                      "right" : {
-                        "type" : "hexstr",
-                        "value" : "0x0001"
-                      }
-                    }
-                  },
-                  "right" : {
-                    "type" : "hexstr",
-                    "value" : "0xffff"
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 204,
-            "column" : 8,
-            "source_fragment" : "hdr.int_count.num_switches = hdr.int_count.num_switches + 1"
-          }
-        },
-        {
-          "op" : "push",
-          "parameters" : [
-            {
-              "type" : "header_stack",
-              "value" : "int_headers"
-            },
-            {
-              "type" : "hexstr",
-              "value" : "0x1"
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 205,
-            "column" : 8,
-            "source_fragment" : "hdr.int_headers.push_front(1)"
-          }
-        },
-        {
-          "op" : "add_header",
-          "parameters" : [
-            {
-              "type" : "header",
-              "value" : "int_headers[0]"
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 208,
-            "column" : 8,
-            "source_fragment" : "hdr.int_headers[0].setValid()"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["int_headers[0]", "switch_id"]
+              "value" : ["standard_metadata", "mcast_grp"]
             },
             {
               "type" : "runtime_data",
@@ -746,9 +214,9 @@
           ],
           "source_info" : {
             "filename" : "p4src/main.p4",
-            "line" : 209,
+            "line" : 89,
             "column" : 8,
-            "source_fragment" : "hdr.int_headers[0].switch_id = (bit<13>)swid"
+            "source_fragment" : "standard_metadata.mcast_grp = gid"
           }
         },
         {
@@ -756,21 +224,18 @@
           "parameters" : [
             {
               "type" : "field",
-              "value" : ["int_headers[0]", "queue_depth"]
+              "value" : ["scalars", "local_metadata_t.is_multicast"]
             },
             {
               "type" : "expression",
               "value" : {
                 "type" : "expression",
                 "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "field",
-                    "value" : ["standard_metadata", "deq_qdepth"]
-                  },
+                  "op" : "b2d",
+                  "left" : null,
                   "right" : {
-                    "type" : "hexstr",
-                    "value" : "0x1fff"
+                    "type" : "bool",
+                    "value" : true
                   }
                 }
               }
@@ -778,167 +243,9 @@
           ],
           "source_info" : {
             "filename" : "p4src/main.p4",
-            "line" : 210,
+            "line" : 90,
             "column" : 8,
-            "source_fragment" : "hdr.int_headers[0].queue_depth = (bit<13>)standard_metadata.deq_qdepth"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["int_headers[0]", "output_port"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "field",
-                    "value" : ["standard_metadata", "egress_port"]
-                  },
-                  "right" : {
-                    "type" : "hexstr",
-                    "value" : "0x3f"
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 211,
-            "column" : 8,
-            "source_fragment" : "hdr.int_headers[0].output_port = (bit<6>)standard_metadata.egress_port"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["ipv4", "ihl"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "+",
-                      "left" : {
-                        "type" : "field",
-                        "value" : ["ipv4", "ihl"]
-                      },
-                      "right" : {
-                        "type" : "hexstr",
-                        "value" : "0x01"
-                      }
-                    }
-                  },
-                  "right" : {
-                    "type" : "hexstr",
-                    "value" : "0x0f"
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 214,
-            "column" : 8,
-            "source_fragment" : "hdr.ipv4.ihl = hdr.ipv4.ihl + 1"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["ipv4", "totalLen"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "+",
-                      "left" : {
-                        "type" : "field",
-                        "value" : ["ipv4", "totalLen"]
-                      },
-                      "right" : {
-                        "type" : "hexstr",
-                        "value" : "0x0004"
-                      }
-                    }
-                  },
-                  "right" : {
-                    "type" : "hexstr",
-                    "value" : "0xffff"
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 215,
-            "column" : 8,
-            "source_fragment" : "hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4"
-          }
-        },
-        {
-          "op" : "assign",
-          "parameters" : [
-            {
-              "type" : "field",
-              "value" : ["ipv4_option", "optionLength"]
-            },
-            {
-              "type" : "expression",
-              "value" : {
-                "type" : "expression",
-                "value" : {
-                  "op" : "&",
-                  "left" : {
-                    "type" : "expression",
-                    "value" : {
-                      "op" : "+",
-                      "left" : {
-                        "type" : "field",
-                        "value" : ["ipv4_option", "optionLength"]
-                      },
-                      "right" : {
-                        "type" : "hexstr",
-                        "value" : "0x04"
-                      }
-                    }
-                  },
-                  "right" : {
-                    "type" : "hexstr",
-                    "value" : "0xff"
-                  }
-                }
-              }
-            }
-          ],
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 216,
-            "column" : 8,
-            "source_fragment" : "hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4"
+            "source_fragment" : "local_metadata.is_multicast = true"
           }
         }
       ]
@@ -950,7 +257,7 @@
       "id" : 0,
       "source_info" : {
         "filename" : "p4src/main.p4",
-        "line" : 163,
+        "line" : 71,
         "column" : 8,
         "source_fragment" : "IngressPipeImpl"
       },
@@ -961,7 +268,7 @@
           "id" : 0,
           "source_info" : {
             "filename" : "p4src/main.p4",
-            "line" : 177,
+            "line" : 95,
             "column" : 10,
             "source_fragment" : "l2_exact_table"
           },
@@ -979,11 +286,12 @@
           "with_counters" : false,
           "support_timeout" : false,
           "direct_meters" : null,
-          "action_ids" : [1, 0],
-          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.drop"],
+          "action_ids" : [1, 2, 0],
+          "actions" : ["IngressPipeImpl.set_egress_port", "IngressPipeImpl.set_multicast_group", "IngressPipeImpl.drop"],
           "base_default_next" : null,
           "next_tables" : {
             "IngressPipeImpl.set_egress_port" : null,
+            "IngressPipeImpl.set_multicast_group" : null,
             "IngressPipeImpl.drop" : null
           },
           "default_entry" : {
@@ -1002,99 +310,17 @@
       "id" : 1,
       "source_info" : {
         "filename" : "p4src/main.p4",
-        "line" : 197,
+        "line" : 116,
         "column" : 8,
         "source_fragment" : "EgressPipeImpl"
       },
-      "init_table" : "node_5",
-      "tables" : [
-        {
-          "name" : "EgressPipeImpl.int_table",
-          "id" : 1,
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 219,
-            "column" : 10,
-            "source_fragment" : "int_table"
-          },
-          "key" : [],
-          "match_type" : "exact",
-          "type" : "simple",
-          "max_size" : 1024,
-          "with_counters" : false,
-          "support_timeout" : false,
-          "direct_meters" : null,
-          "action_ids" : [3, 2],
-          "actions" : ["EgressPipeImpl.add_int_header", "NoAction"],
-          "base_default_next" : null,
-          "next_tables" : {
-            "EgressPipeImpl.add_int_header" : null,
-            "NoAction" : null
-          },
-          "default_entry" : {
-            "action_id" : 2,
-            "action_const" : false,
-            "action_data" : [],
-            "action_entry_const" : false
-          }
-        }
-      ],
+      "init_table" : null,
+      "tables" : [],
       "action_profiles" : [],
-      "conditionals" : [
-        {
-          "name" : "node_5",
-          "id" : 0,
-          "source_info" : {
-            "filename" : "p4src/main.p4",
-            "line" : 228,
-            "column" : 12,
-            "source_fragment" : "hdr.int_count.isValid()"
-          },
-          "expression" : {
-            "type" : "expression",
-            "value" : {
-              "op" : "d2b",
-              "left" : null,
-              "right" : {
-                "type" : "field",
-                "value" : ["int_count", "$valid$"]
-              }
-            }
-          },
-          "false_next" : null,
-          "true_next" : "EgressPipeImpl.int_table"
-        }
-      ]
-    }
-  ],
-  "checksums" : [
-    {
-      "name" : "cksum",
-      "id" : 0,
-      "source_info" : {
-        "filename" : "p4src/main.p4",
-        "line" : 239,
-        "column" : 8,
-        "source_fragment" : "update_checksum( ..."
-      },
-      "target" : ["ipv4", "hdrChecksum"],
-      "type" : "generic",
-      "calculation" : "calc",
-      "verify" : false,
-      "update" : true,
-      "if_cond" : {
-        "type" : "expression",
-        "value" : {
-          "op" : "d2b",
-          "left" : null,
-          "right" : {
-            "type" : "field",
-            "value" : ["ipv4", "$valid$"]
-          }
-        }
-      }
+      "conditionals" : []
     }
   ],
+  "checksums" : [],
   "force_arith" : [],
   "extern_instances" : [],
   "field_aliases" : [
diff --git a/src/tests/hackfest3/p4/main.p4 b/src/tests/hackfest3/p4/main.p4
index 68d93fe947f090bc4e0ae1d09b315337a2e9aa0e..843eb0d580e362e74b25c768b1b01e750138637a 100644
--- a/src/tests/hackfest3/p4/main.p4
+++ b/src/tests/hackfest3/p4/main.p4
@@ -20,136 +20,47 @@
 
 typedef bit<9>   port_num_t;
 typedef bit<48>  mac_addr_t;
+typedef bit<16>  mcast_group_id_t;
 
 //------------------------------------------------------------------------------
 // HEADER DEFINITIONS
 //------------------------------------------------------------------------------
 
-#define MAX_INT_HEADERS 9
-
-const bit<16> TYPE_IPV4 = 0x800;
-const bit<5>  IPV4_OPTION_INT = 31;
-
-typedef bit<9>  egressSpec_t;
-typedef bit<48> macAddr_t;
-typedef bit<32> ip4Addr_t;
-
-typedef bit<13> switch_id_t;
-typedef bit<32> queue_depth_t;
-
 header ethernet_t {
-    macAddr_t dstAddr;
-    macAddr_t srcAddr;
-    bit<16>   etherType;
-}
-
-header ipv4_t {
-    bit<4>    version;
-    bit<4>    ihl;
-    bit<6>    dscp;
-    bit<2>    ecn;
-    bit<16>   totalLen;
-    bit<16>   identification;
-    bit<3>    flags;
-    bit<13>   fragOffset;
-    bit<8>    ttl;
-    bit<8>    protocol;
-    bit<16>   hdrChecksum;
-    ip4Addr_t srcAddr;
-    ip4Addr_t dstAddr;
-}
-
-header ipv4_option_t {
-    bit<1> copyFlag;
-    bit<2> optClass;
-    bit<5> option;
-    bit<8> optionLength;
+    mac_addr_t  dst_addr;
+    mac_addr_t  src_addr;
+    bit<16>     ether_type;
 }
 
-header int_count_t {
-    bit<16>   num_switches;
-}
-
-header int_header_t {
-    queue_depth_t queue_depth;
-}
-
-
-struct parser_metadata_t {
-    bit<16> num_headers_remaining;
+struct parsed_headers_t {
+    ethernet_t  ethernet;
 }
 
 struct local_metadata_t {
-    parser_metadata_t  parser_metadata;
-}
-
-struct parsed_headers_t {
-    ethernet_t   ethernet;
-    ipv4_t       ipv4;
-    ipv4_option_t ipv4_option;
-    int_count_t   int_count;
-    int_header_t[MAX_INT_HEADERS] int_headers;
+    bool        is_multicast;
 }
 
-error { IPHeaderWithoutOptions }
 
 //------------------------------------------------------------------------------
 // INGRESS PIPELINE
 //------------------------------------------------------------------------------
 
-parser ParserImpl(packet_in packet,
-                out parsed_headers_t hdr,
-                inout local_metadata_t local_metadata,
-                inout standard_metadata_t standard_metadata) {
-
+parser ParserImpl (packet_in packet,
+                   out parsed_headers_t hdr,
+                   inout local_metadata_t local_metadata,
+                   inout standard_metadata_t standard_metadata)
+{
     state start {
-
-        packet.extract(hdr.ethernet);
-        transition select(hdr.ethernet.etherType){
-            TYPE_IPV4: parse_ipv4;
-            default: accept;
-        }
+      transition parse_ethernet;
     }
 
-    state parse_ipv4 {
-        packet.extract(hdr.ipv4);
-        //Check if ihl is bigger than 5. Packets without ip options set ihl to 5.
-        verify(hdr.ipv4.ihl >= 5, error.IPHeaderWithoutOptions);
-        transition select(hdr.ipv4.ihl) {
-            5             : accept;
-            default       : parse_ipv4_option;
-        }
-    }
-
-    state parse_ipv4_option {
-        packet.extract(hdr.ipv4_option);
-        transition select(hdr.ipv4_option.option){
-
-            IPV4_OPTION_INT:  parse_int;
-            default: accept;
-
-        }
-     }
-
-    state parse_int {
-        packet.extract(hdr.int_count);
-        local_metadata.parser_metadata.num_headers_remaining = hdr.int_count.num_switches;
-        transition select(local_metadata.parser_metadata.num_headers_remaining){
-            0: accept;
-            default: parse_int_headers;
-        }
-    }
-
-    state parse_int_headers {
-        packet.extract(hdr.int_headers.next);
-        local_metadata.parser_metadata.num_headers_remaining = local_metadata.parser_metadata.num_headers_remaining -1 ;
-        transition select(local_metadata.parser_metadata.num_headers_remaining){
-            0: accept;
-            default: parse_int_headers;
-        }
+    state parse_ethernet {
+        packet.extract(hdr.ethernet);
+        transition accept;
     }
 }
 
+
 control VerifyChecksumImpl(inout parsed_headers_t hdr,
                            inout local_metadata_t meta)
 {
@@ -161,6 +72,7 @@ control IngressPipeImpl (inout parsed_headers_t    hdr,
                          inout local_metadata_t    local_metadata,
                          inout standard_metadata_t standard_metadata) {
 
+    // Drop action shared by many tables.
     action drop() {
         mark_to_drop(standard_metadata);
     }
@@ -169,6 +81,15 @@ control IngressPipeImpl (inout parsed_headers_t    hdr,
         standard_metadata.egress_spec = port;
     }
 
+    action set_multicast_group(mcast_group_id_t gid) {
+        // gid will be used by the Packet Replication Engine (PRE) in the
+        // Traffic Manager--located right after the ingress pipeline, to
+        // replicate a packet to multiple egress ports, specified by the control
+        // plane by means of P4Runtime MulticastGroupEntry messages.
+        standard_metadata.mcast_grp = gid;
+        local_metadata.is_multicast = true;
+    }
+
     // --- l2_exact_table ------------------
 
     table l2_exact_table {
@@ -177,6 +98,7 @@ control IngressPipeImpl (inout parsed_headers_t    hdr,
         }
         actions = {
             set_egress_port;
+            set_multicast_group;
             @defaultonly drop;
         }
         const default_action = drop;
@@ -194,78 +116,24 @@ control IngressPipeImpl (inout parsed_headers_t    hdr,
 control EgressPipeImpl (inout parsed_headers_t hdr,
                         inout local_metadata_t local_metadata,
                         inout standard_metadata_t standard_metadata) {
-
-    
-    action add_int_header(switch_id_t swid){
-        //increase int stack counter by one
-        hdr.int_count.num_switches = hdr.int_count.num_switches + 1;
-        hdr.int_headers.push_front(1);
-        // This was not needed in older specs. Now by default pushed
-        // invalid elements are
-        hdr.int_headers[0].setValid();
-        hdr.int_headers[0].queue_depth = (bit<32>)standard_metadata.ingress_global_timestamp;
-
-        //update ip header length
-        hdr.ipv4.ihl = hdr.ipv4.ihl + 1;
-        hdr.ipv4.totalLen = hdr.ipv4.totalLen + 4;
-        hdr.ipv4_option.optionLength = hdr.ipv4_option.optionLength + 4;
-    }
-
-    table int_table {
-        key = {
-            standard_metadata.ingress_port: exact;
-        }
-        actions = {
-            add_int_header;
-            NoAction;
-        }
-        default_action = NoAction;
-    }
-
-    apply {
-        if (hdr.int_count.isValid()){
-            int_table.apply();
-        }
-    }
+    apply { /* EMPTY */ }
 }
 
 
 control ComputeChecksumImpl(inout parsed_headers_t hdr,
                             inout local_metadata_t local_metadata)
 {
-    apply {
-        update_checksum(
-	          hdr.ipv4.isValid(),
-            { hdr.ipv4.version,
-	            hdr.ipv4.ihl,
-              hdr.ipv4.dscp,
-              hdr.ipv4.ecn,
-              hdr.ipv4.totalLen,
-              hdr.ipv4.identification,
-              hdr.ipv4.flags,
-              hdr.ipv4.fragOffset,
-              hdr.ipv4.ttl,
-              hdr.ipv4.protocol,
-              hdr.ipv4.srcAddr,
-              hdr.ipv4.dstAddr },
-            hdr.ipv4.hdrChecksum,
-            HashAlgorithm.csum16);
-    }
+    apply { /* EMPTY */ }
 }
 
+
 control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
     apply {
-
-        //parsed headers have to be added again into the packet.
         packet.emit(hdr.ethernet);
-        packet.emit(hdr.ipv4);
-        packet.emit(hdr.ipv4_option);
-        packet.emit(hdr.int_count);
-        packet.emit(hdr.int_headers);
-
     }
 }
 
+
 V1Switch(
     ParserImpl(),
     VerifyChecksumImpl(),
diff --git a/src/tests/hackfest3/p4/p4info.txt b/src/tests/hackfest3/p4/p4info.txt
index 42052049e1fd22b4659138ceab4123b0b6e64126..0b58e740864b72e6ca87582431cd7bd57894d0dd 100644
--- a/src/tests/hackfest3/p4/p4info.txt
+++ b/src/tests/hackfest3/p4/p4info.txt
@@ -16,6 +16,9 @@ tables {
   action_refs {
     id: 16812802
   }
+  action_refs {
+    id: 16841371
+  }
   action_refs {
     id: 16796182
     annotations: "@defaultonly"
@@ -24,20 +27,6 @@ tables {
   const_default_action_id: 16796182
   size: 1024
 }
-tables {
-  preamble {
-    id: 33578840
-    name: "EgressPipeImpl.int_table"
-    alias: "int_table"
-  }
-  action_refs {
-    id: 16827240
-  }
-  action_refs {
-    id: 16800567
-  }
-  size: 1024
-}
 actions {
   preamble {
     id: 16796182
@@ -59,21 +48,14 @@ actions {
 }
 actions {
   preamble {
-    id: 16800567
-    name: "NoAction"
-    alias: "NoAction"
-  }
-}
-actions {
-  preamble {
-    id: 16827240
-    name: "EgressPipeImpl.add_int_header"
-    alias: "add_int_header"
+    id: 16841371
+    name: "IngressPipeImpl.set_multicast_group"
+    alias: "set_multicast_group"
   }
   params {
     id: 1
-    name: "swid"
-    bitwidth: 13
+    name: "gid"
+    bitwidth: 16
   }
 }
 type_info {