Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions backends/nxp/backend/ir/converter/node_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# LICENSE file in the root directory of this source tree.

from abc import ABC, abstractmethod
from typing import Callable

import torch

Expand Down Expand Up @@ -181,6 +182,33 @@ def _has_shared_q_params_if_quantized(node: Node) -> bool:
# Node not quantized
return True

@staticmethod
def is_node_alone_in_partition(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice!👏🏻

node: Node, partition_list: list[Partition], filter_fn: Callable
):
"""Return True if `node` is the only node in its partition for which `filter_fn`
returns True.

The function finds the unique partition containing `node` and applies
`filter_fn` to all nodes in that partition. If only one node passes the
predicate — and that node is `node` — the function returns True.

:param node: The torch.Node to check.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: torch.fx.Node

:param partition_list: List of proposed partitions.
:param filter_fn: Predicate applied to nodes in the partition.
`node` is considered alone if it is the only node
for which this predicate returns True.
"""
partitions = [p for p in partition_list if node in p.nodes]
if len(partitions) != 1:
return False # Should never happen
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When this code was inside the supports_partitioning_result, returning False would result in not delegating the node. Here, it would have the opposite effect. Perhaps we should raise an exception here instead. What do you think?


partition = partitions[0]
filtered_partition_nodes = list(filter(filter_fn, partition.nodes))
return (
len(filtered_partition_nodes) == 1 and filtered_partition_nodes[0] == node
)
Comment on lines +185 to +210

def assert_convertible(self, node):
"""Assert that the call `is_supported()` returns `True`. Otherwise, raise an exception and print an
error message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.fx.passes.infra.partitioner import Partition
Expand Down Expand Up @@ -77,18 +80,11 @@ def supports_partitioning_result(
bounds = cls._get_clamp_bounds(node)

if bounds in [cls.SUPPORTED_BOUNDS["Relu"], cls.SUPPORTED_BOUNDS["Relu6"]]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: A comment explaining why only Relu and Relu6 would be useful.

# If this is the only operator in the partition, NeutronConverter will not create a NeutronNode for some
# reason.
clamp_partitions = [p for p in partition_list if node in p.nodes]
if len(clamp_partitions) != 1:
return False # Should never happen

clamp_partition = clamp_partitions[0]
non_q_dq_partition_nodes = list(
filter(is_not_qdq_node, clamp_partition.nodes)
is_alone_in_partition = cls.is_node_alone_in_partition(
node, partition_list, filter_fn=is_not_qdq_node
)
if len(non_q_dq_partition_nodes) <= 1:
return False # This would be the only node in the partition, which would cause a crash later on.
if is_alone_in_partition:
return activation_supported_on_target(node, neutron_target_spec)

return True

Expand Down
Original file line number Diff line number Diff line change
@@ -1,47 +1,108 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
is_not_qdq_node,
NodeConverter,
Partition,
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node
from torch.nn import Parameter


class HardTanhConverter(NodeConverter):

# Maps possible input parameters of HardTanh to equivalent ReLU-based operators supported by TFLite.
supported_modes_map = {
SUPPORTED_MODES_MAP = {
(0.0, 6.0): BuiltinOperator.RELU6,
(-1.0, 1.0): BuiltinOperator.RELU_N1_TO_1,
(0.0, 1.0): BuiltinOperator.RELU_0_TO_1,
(0.0, float("inf")): BuiltinOperator.RELU,
}

# Maps possible modes of HardTanh to equivalent ReLU bounds.
SUPPORTED_BOUNDS_MAP = {
"ReluN1To1": (-1.0, 1.0),
"Relu0To1": (0.0, 1.0),
"Relu6": (0.0, 6.0),
"Relu": (0.0, float("inf")),
}

@staticmethod
def _get_hardtanh_bounds(node: Node) -> tuple[int, int]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: The return type should be tuple[float, float] as the bounds don't need to be strictly integers.

args = node.args

match len(args):
case 1:
min_val = -1
max_val = 1

case 2:
min_val = args[1]
max_val = 1

case 3:
min_val = args[1]
max_val = args[2]

case _:
# should not occur
min_val = 0
max_val = 1
Comment on lines +59 to +61

return min_val, max_val

@staticmethod
def _is_supported_in_IR(
node: Node,
parameters_mapping: dict[str, Parameter],
custom_delegation_options: CustomDelegationOptions,
) -> bool:
_, min_value, max_value = node.args
return (min_value, max_value) in HardTanhConverter.supported_modes_map.keys()
bounds = HardTanhConverter._get_hardtanh_bounds(node)
return bounds in HardTanhConverter.SUPPORTED_MODES_MAP.keys()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: The .keys() is redundant.


@classmethod
def supports_partitioning_result(
cls,
node: Node,
partition_list: list[Partition],
custom_delegation_options: CustomDelegationOptions,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
) -> bool:
bounds = HardTanhConverter._get_hardtanh_bounds(node)

if bounds in [
cls.SUPPORTED_BOUNDS_MAP["Relu"],
cls.SUPPORTED_BOUNDS_MAP["Relu6"],
]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: A comment explaining why only Relu and Relu6 would be useful.

is_alone_in_partition = cls.is_node_alone_in_partition(
node, partition_list, filter_fn=is_not_qdq_node
)
if is_alone_in_partition:
return activation_supported_on_target(node, neutron_target_spec)

return True

def convert(self, node: Node):
"""Convert 'aten::hardtanh' to it's supported ReLU equivalent."""
self.assert_convertible(node)

t_op = self._create_tflite_op_with_io_tensors(node)

_, min_value, max_value = node.args
bounds = HardTanhConverter._get_hardtanh_bounds(node)

op = self.supported_modes_map[(min_value, max_value)]
op = self.SUPPORTED_MODES_MAP[bounds]
t_op.opcode_index = self.builder.op_code_index_for_op_type(op)

self.builder.append_operators([t_op])
Original file line number Diff line number Diff line change
@@ -1,15 +1,21 @@
# Copyright 2024-2025 NXP
# Copyright 2024-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.ir.converter.node_converter import (
CustomDelegationOptions,
is_not_qdq_node,
NodeConverter,
Partition,
)
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
BuiltinOperator,
)
from executorch.backends.nxp.backend.neutron_operator_support import (
activation_supported_on_target,
NeutronTargetSpec,
)
from torch.fx import Node
from torch.nn import Parameter

Expand All @@ -24,6 +30,23 @@ def _is_supported_in_IR(
) -> bool:
return True

@classmethod
def supports_partitioning_result(
cls,
node: Node,
partition_list: list[Partition],
custom_delegation_options: CustomDelegationOptions,
neutron_target_spec: NeutronTargetSpec,
parameters_mapping: dict[str, Parameter],
) -> bool:
is_alone_in_partition = cls.is_node_alone_in_partition(
node, partition_list, filter_fn=is_not_qdq_node
)
if is_alone_in_partition:
return activation_supported_on_target(node, neutron_target_spec)

return True

def convert(self, node: Node):
self.assert_convertible(node)

Expand Down
28 changes: 27 additions & 1 deletion backends/nxp/backend/neutron_operator_support.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
# Copyright 2025 NXP
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from executorch.backends.nxp.backend.data_format import NXP_NODE_FORMAT
from executorch.backends.nxp.backend.edge_helper import input_tensor
from executorch.backends.nxp.backend.ir.converter.conversion.translator import (
dims_to_channels_last,
)
from executorch.backends.nxp.backend.neutron_target_spec import NeutronTargetSpec
from torch.fx import Node


def is_tensor_invariant_permutation(
Expand Down Expand Up @@ -77,3 +83,23 @@ def transposition_is_supported_on_neutron(
return True

return False


def activation_supported_on_target(
node: Node, neutron_target_spec: NeutronTargetSpec
) -> bool:
"""This function determines if the current NeutronSoftware properly supports an activation operator represented by the given node.

:param node: The node representing the activation operator.
:param neutron_target_spec: Object for querying the target platform to retrieve its properties.
"""
input_shape = list(input_tensor(node, 0).shape)
if node.args[0].meta[NXP_NODE_FORMAT].is_channels_first():
input_shape = dims_to_channels_last(input_shape)

c = input_shape[-1]
num_macs = neutron_target_spec.get_num_macs()

# activations in Neutron are delegable only
# if `num_channels` % `num_macs` == 0
return c % num_macs == 0
Comment on lines +88 to +105
Original file line number Diff line number Diff line change
Expand Up @@ -100,21 +100,28 @@ def test_convert_clamp__supported(mocker, min, max):

# noinspection PyShadowingBuiltins
@pytest.mark.parametrize(
"min, max",
"input_shape, min, max",
[
pytest.param(0, 6, id="min = 0, max = 6 (Relu6)"),
pytest.param(0, None, id="min = 0, max = None (Relu)"),
pytest.param(
(1, 7, 9, 11),
0,
6,
id="min = 0, max = 6 (Relu6), num_channels not divisible by NUM_MACS, alone in partition",
),
pytest.param(
(1, 7, 9, 11),
0,
None,
id="min = 0, max = None (Relu), num_channels not divisible by NUM_MACS, alone in partition",
),
],
)
def test_convert_clamp__single_op__not_delegated_variants(min, max):
# Test that Clamp representable as Relu6 or Relu is NOT delegated, because it is a single op model which is not
# supported by Neutron.
input_shape = (23,)
def test_convert_clamp__unsupported_shape(input_shape, min, max):
model = ClampModule(min, max)

delegated_ep = to_quantized_edge_program(model, input_shape).exported_program()

# Make sure the `clamp` was NOT delegated (single op model).
# Make sure the `clamp` was NOT delegated.
assert not graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall])
assert graph_contains_any_of_ops(delegated_ep.graph, [Clamp])

Expand Down
Loading
Loading