diff --git a/backends/nxp/backend/ir/converter/node_converter.py b/backends/nxp/backend/ir/converter/node_converter.py index 623ba97ba73..fda62371123 100755 --- a/backends/nxp/backend/ir/converter/node_converter.py +++ b/backends/nxp/backend/ir/converter/node_converter.py @@ -181,6 +181,33 @@ def _has_shared_q_params_if_quantized(node: Node) -> bool: # Node not quantized return True + @staticmethod + def is_node_alone_in_partition( + node: Node, partition_list: list[Partition], filter_fn: Callable[[Node], bool] + ) -> bool: + """Return True if `node` is the only node in its partition for which `filter_fn` + returns True. + + The function finds the unique partition containing `node` and applies + `filter_fn` to all nodes in that partition. If only one node passes the + predicate — and that node is `node` — the function returns True. + + :param node: The torch.fx.Node to check. + :param partition_list: List of proposed partitions. + :param filter_fn: Predicate applied to nodes in the partition. + `node` is considered alone if it is the only node + for which this predicate returns True. + """ + partitions = [p for p in partition_list if node in p.nodes] + if len(partitions) != 1: + raise ValueError("Cannot find a partition of a node in graph. This should not occur.") + + partition = partitions[0] + filtered_partition_nodes = list(filter(filter_fn, partition.nodes)) + return ( + len(filtered_partition_nodes) == 1 and filtered_partition_nodes[0] == node + ) + def assert_convertible(self, node): """Assert that the call `is_supported()` returns `True`. Otherwise, raise an exception and print an error message. diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/clamp_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/clamp_converter.py index 82347e38e8a..301000394c6 100644 --- a/backends/nxp/backend/ir/converter/node_converters/ops_converters/clamp_converter.py +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/clamp_converter.py @@ -76,6 +76,9 @@ def supports_partitioning_result( ) -> bool: bounds = cls._get_clamp_bounds(node) + # Neutron cannot delegate a partition where ReLU or ReLU6 is the only operator + # and at the same time the node does not satisfy delegation requirements. + # In contrast, ReLUN1To1 and ReLU0To1 are supported and delegated successfuly. if bounds in [cls.SUPPORTED_BOUNDS["Relu"], cls.SUPPORTED_BOUNDS["Relu6"]]: # If this is the only operator in the partition, NeutronConverter will not create a NeutronNode for some # reason. diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/hardtanh_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/hardtanh_converter.py index 14d69ed42fb..00bd245b997 100644 --- a/backends/nxp/backend/ir/converter/node_converters/ops_converters/hardtanh_converter.py +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/hardtanh_converter.py @@ -24,6 +24,37 @@ class HardTanhConverter(NodeConverter): (0.0, float("inf")): BuiltinOperator.RELU, } + # Maps possible modes of HardTanh to equivalent ReLU bounds. + SUPPORTED_BOUNDS_MAP = { + "ReluN1To1": (-1.0, 1.0), + "Relu0To1": (0.0, 1.0), + "Relu6": (0.0, 6.0), + "Relu": (0.0, float("inf")), + } + + @staticmethod + def _get_hardtanh_bounds(node: Node) -> tuple[int, int]: + args = node.args + + match len(args): + case 1: + min_val = -1 + max_val = 1 + + case 2: + min_val = args[1] + max_val = 1 + + case 3: + min_val = args[1] + max_val = args[2] + + case _: + # should not occur + raise ValueError(f"Unexpected number of arguments for HardTanh node: {len(args)}") + + return min_val, max_val + @staticmethod def _is_supported_in_IR( node: Node, @@ -32,9 +63,37 @@ def _is_supported_in_IR( ) -> bool: _, min_value, max_value = node.args return (min_value, max_value) in HardTanhConverter.supported_modes_map.keys() + bounds = HardTanhConverter._get_hardtanh_bounds(node) + return bounds in HardTanhConverter.SUPPORTED_MODES_MAP + + @classmethod + def supports_partitioning_result( + cls, + node: Node, + partition_list: list[Partition], + custom_delegation_options: CustomDelegationOptions, + neutron_target_spec: NeutronTargetSpec, + parameters_mapping: dict[str, Parameter], + ) -> bool: + bounds = HardTanhConverter._get_hardtanh_bounds(node) + + # Neutron cannot delegate a partition where ReLU or ReLU6 is the only operator + # and at the same time the node does not satisfy delegation requirements. + # In contrast, ReLUN1To1 and ReLU0To1 are supported and delegated successfuly. + if bounds in [ + cls.SUPPORTED_BOUNDS_MAP["Relu"], + cls.SUPPORTED_BOUNDS_MAP["Relu6"], + ]: + is_alone_in_partition = cls.is_node_alone_in_partition( + node, partition_list, filter_fn=is_not_qdq_node + ) + if is_alone_in_partition: + return activation_supported_on_target(node, neutron_target_spec) + + return True def convert(self, node: Node): - """Convert 'aten::hardtanh' to it's supported ReLU equivalent.""" + """Convert 'aten::hardtanh' to its supported ReLU equivalent.""" self.assert_convertible(node) t_op = self._create_tflite_op_with_io_tensors(node) diff --git a/backends/nxp/tests/ir/converter/node_converter/test_hardtanh_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_hardtanh_converter.py index fb272a2c650..120253cf0c2 100644 --- a/backends/nxp/tests/ir/converter/node_converter/test_hardtanh_converter.py +++ b/backends/nxp/tests/ir/converter/node_converter/test_hardtanh_converter.py @@ -72,7 +72,7 @@ def test_relu6_quant(mocker, input_shape: tuple[int], inplace: bool, use_qat: bo def test_custom_hardtanh_quant( mocker, input_shape: tuple[int], - activation_range: tuple[int, int], + activation_range: tuple[float, float], inplace: bool, use_qat: bool, ): @@ -105,3 +105,34 @@ def test_custom_hardtanh_quant( input_data=input_data, atol=2.0, ) + + +@pytest.mark.parametrize( + "input_shape, activation_range", + [ + pytest.param( + (3, 7, 15, 7), + (0, float("inf")), + id="activation range: Relu, num_channels not divisible by NUM_MACS, alone in partition", + ), + pytest.param( + (3, 7, 15, 7), + (0, 6), + id="activation range: Relu6, num_channels not divisible by NUM_MACS, alone in partition", + ), + ], +) +def test_hardtanh__unsupported( + input_shape: tuple[int], + activation_range: tuple[float, float], + use_qat: bool, +): + min_val, max_val = activation_range + model = HardTanhModule(min_val, max_val) + delegated_ep = to_quantized_edge_program( + model, input_shape, use_qat=use_qat + ).exported_program() + + # Make sure the `hardtanh` was NOT delegated. + assert not graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall]) + assert graph_contains_any_of_ops(delegated_ep.graph, [HardTanh, HardTanh_])