Skip to content

Commit 82f3381

Browse files
authored
Arm backend: Don't handle const placeholders in match_arg_ranks_pass (#9134)
This pass did previously handle constant placeholders directly by modifying the data in the state_dict, whereas call_functions are handled by inserting view ops. Since the introduction of the FuseConsantOpsPass however, ops acting on constant placeholders are computed AOT and fused into one single node. We can therefore now handle all nodes the same way here by inserting view nodes, removing the need for a special case in this pass. Signed-off-by: Martin Lindström <[email protected]>
1 parent 206a4df commit 82f3381

File tree

1 file changed

+13
-36
lines changed

1 file changed

+13
-36
lines changed

backends/arm/_passes/match_arg_ranks_pass.py

+13-36
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Copyright (c) Meta Platforms, Inc. and affiliates.
2-
# Copyright 2024 Arm Limited and/or its affiliates.
32
# All rights reserved.
3+
# Copyright 2024-2025 Arm Limited and/or its affiliates.
44
#
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
@@ -23,7 +23,17 @@
2323
class MatchArgRanksPass(ExportPass):
2424
"""
2525
For ops in 'targeted_ops', make sure that the inputs share the same rank.
26-
New dimensions are inserted at from the beginning of the
26+
New dimensions are inserted from the beginning of the inputs that have a
27+
lower rank to match the input with the highest rank.
28+
29+
Example:
30+
input0 = shape(4, 3, 2)
31+
input1 = shape(2)
32+
input2 = shape(3, 1)
33+
Becomes:
34+
input0 = shape(4, 3, 2)
35+
input1 = shape(1, 1, 2)
36+
input2 = shape(1, 3, 1)
2737
"""
2838

2939
def __init__(self, exported_program):
@@ -54,34 +64,6 @@ def _match_op_rank(self, graph_module, node, arg, max_rank):
5464
)
5565
node.replace_input_with(arg, view)
5666

57-
def _match_buffer_rank(self, arg, max_rank):
58-
"""
59-
Change arg's fake tensor meta to match max_rank if:
60-
- arg is found in inputs_to_buffers or inputs_to_parameters.
61-
"""
62-
fake_tensor = get_first_fake_tensor(arg)
63-
shape = fake_tensor.shape
64-
rank = len(shape)
65-
new_shape = list([1] * (max_rank - rank) + list(shape))
66-
67-
buffer_name = None
68-
if arg.name in self.exported_program.graph_signature.inputs_to_buffers:
69-
buffer_name = self.exported_program.graph_signature.inputs_to_buffers[
70-
arg.name
71-
]
72-
elif arg.name in self.exported_program.graph_signature.inputs_to_parameters:
73-
buffer_name = self.exported_program.graph_signature.inputs_to_parameters[
74-
arg.name
75-
]
76-
if buffer_name:
77-
new_tensor = self.exported_program.state_dict[buffer_name].reshape(
78-
new_shape
79-
)
80-
self.exported_program.state_dict[buffer_name] = new_tensor
81-
arg.meta["val"] = fake_tensor.fake_mode.from_tensor(
82-
new_tensor, static_shapes=True
83-
)
84-
8567
def call(self, graph_module: GraphModule) -> PassResult:
8668
for node in graph_module.graph.nodes:
8769
node = cast(Node, node)
@@ -105,12 +87,7 @@ def call(self, graph_module: GraphModule) -> PassResult:
10587
if rank == max_rank:
10688
continue
10789

108-
# If the argument is call_function, match shape by inserting view node.
109-
if arg.op == "call_function":
110-
self._match_op_rank(graph_module, node, arg, max_rank)
111-
else:
112-
# If the argument is a buffer or parameter, adjust shape by changing the fake tensor meta.
113-
self._match_buffer_rank(arg, max_rank)
90+
self._match_op_rank(graph_module, node, arg, max_rank)
11491

11592
graph_module.recompile()
11693
graph_module = super().call(graph_module).graph_module

0 commit comments

Comments
 (0)