Skip to content

Commit 6f4fab8

Browse files
RahulC7facebook-github-bot
authored andcommitted
Adding Test for CadenceWithLayerNormQuantizer
Summary: Adding test for CadenceWithLayerNormQuantizer. https://www.internalfb.com/code/fbsource/[72237310dfd9016b5f54a44d994c8a7065eda5d1]/fbcode/executorch/backends/cadence/aot/quantizer/quantizer.py?lines=296-306 Differential Revision: D88898823
1 parent 9909402 commit 6f4fab8

File tree

1 file changed

+35
-1
lines changed

1 file changed

+35
-1
lines changed

backends/cadence/aot/tests/test_quantizer_ops.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@
5656
CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage
5757
CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition
5858
CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage
59-
CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage
6059
}
6160

6261

@@ -118,6 +117,15 @@
118117
# For softmax: only input_activation
119118
[qconfig_A16.input_activation],
120119
),
120+
(
121+
"layer_norm_A8W8",
122+
lambda self: self._build_layer_norm_graph(),
123+
CadenceWithLayerNormQuantizer(),
124+
torch.ops.aten.layer_norm.default,
125+
qconfig_A8W8.output_activation,
126+
# For layer_norm: only input_activation (weights/bias are passed as others)
127+
[qconfig_A8W8.input_activation],
128+
),
121129
]
122130

123131
# Derive the set of tested quantizer classes from the test cases.
@@ -243,6 +251,32 @@ def _build_softmax_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
243251
self.assertEqual(len(softmax_nodes), 1, "Should find exactly one softmax node")
244252
return gm, softmax_nodes[0]
245253

254+
def _build_layer_norm_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
255+
"""Build a simple graph with a layer_norm operation."""
256+
builder = GraphBuilder()
257+
# Input shape: (batch, features)
258+
x = builder.placeholder("x", torch.randn(1, 10))
259+
# normalized_shape must match the last dimension(s) of input
260+
normalized_shape = [10]
261+
layer_norm = builder.call_operator(
262+
op=torch.ops.aten.layer_norm.default,
263+
args=(x, normalized_shape),
264+
meta=NodeMetadata(
265+
{"source_fn_stack": [("layer_norm", torch.ops.aten.layer_norm.default)]}
266+
),
267+
)
268+
builder.output([layer_norm])
269+
gm = builder.get_graph_module()
270+
271+
layer_norm_nodes = gm.graph.find_nodes(
272+
op="call_function",
273+
target=torch.ops.aten.layer_norm.default,
274+
)
275+
self.assertEqual(
276+
len(layer_norm_nodes), 1, "Should find exactly one layer_norm node"
277+
)
278+
return gm, layer_norm_nodes[0]
279+
246280
@parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES)
247281
def test_quantizer_annotation(
248282
self,

0 commit comments

Comments
 (0)