Skip to content

Commit 71aabe7

Browse files
RahulC7facebook-github-bot
authored andcommitted
Adding Test for CadenceWithLayerNormQuantizer (#16355)
Summary: Adding test for CadenceWithLayerNormQuantizer. https://www.internalfb.com/code/fbsource/[72237310dfd9016b5f54a44d994c8a7065eda5d1]/fbcode/executorch/backends/cadence/aot/quantizer/quantizer.py?lines=296-306 Reviewed By: hsharma35 Differential Revision: D88898823
1 parent 200d5f6 commit 71aabe7

File tree

1 file changed

+35
-2
lines changed

1 file changed

+35
-2
lines changed

backends/cadence/aot/tests/test_quantizer_ops.py

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from typing import Callable
1212

1313
import torch
14-
from executorch.backends.cadence.aot.graph_builder import GraphBuilder
14+
from executorch.backends.cadence.aot.graph_builder import GraphBuilder, single_op_builder
1515
from executorch.backends.cadence.aot.quantizer import quantizer as quantizer_module
1616
from executorch.backends.cadence.aot.quantizer.patterns import AddmmPattern
1717
from executorch.backends.cadence.aot.quantizer.quantizer import (
@@ -56,7 +56,6 @@
5656
CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage
5757
CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition
5858
CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage
59-
CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage
6059
}
6160

6261

@@ -118,6 +117,15 @@
118117
# For softmax: only input_activation
119118
[qconfig_A16.input_activation],
120119
),
120+
(
121+
"layer_norm_A8W8",
122+
lambda self: self._build_layer_norm_graph(),
123+
CadenceWithLayerNormQuantizer(),
124+
torch.ops.aten.layer_norm.default,
125+
qconfig_A8W8.output_activation,
126+
# For layer_norm: only input_activation (weights/bias are passed as others)
127+
[qconfig_A8W8.input_activation],
128+
),
121129
]
122130

123131
# Derive the set of tested quantizer classes from the test cases.
@@ -243,6 +251,31 @@ def _build_softmax_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
243251
self.assertEqual(len(softmax_nodes), 1, "Should find exactly one softmax node")
244252
return gm, softmax_nodes[0]
245253

254+
def _build_layer_norm_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
255+
"""Build a simple graph with a layer_norm operation."""
256+
# Input shape: (batch, features)
257+
x = torch.randn(1, 10)
258+
# normalized_shape must match the last dimension(s) of input
259+
normalized_shape = [10]
260+
gm = single_op_builder(
261+
placeholders=(x,),
262+
op=torch.ops.aten.layer_norm.default,
263+
args=(x, normalized_shape),
264+
)
265+
266+
layer_norm_nodes = gm.graph.find_nodes(
267+
op="call_function",
268+
target=torch.ops.aten.layer_norm.default,
269+
)
270+
self.assertEqual(
271+
len(layer_norm_nodes), 1, "Should find exactly one layer_norm node"
272+
)
273+
# Add source_fn_stack metadata required by quantizer pattern matching
274+
layer_norm_nodes[0].meta["source_fn_stack"] = [
275+
("layer_norm", torch.ops.aten.layer_norm.default)
276+
]
277+
return gm, layer_norm_nodes[0]
278+
246279
@parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES)
247280
def test_quantizer_annotation(
248281
self,

0 commit comments

Comments
 (0)