|
56 | 56 | CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage |
57 | 57 | CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition |
58 | 58 | CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage |
59 | | - CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage |
60 | 59 | } |
61 | 60 |
|
62 | 61 |
|
|
118 | 117 | # For softmax: only input_activation |
119 | 118 | [qconfig_A16.input_activation], |
120 | 119 | ), |
| 120 | + ( |
| 121 | + "layer_norm_A8W8", |
| 122 | + lambda self: self._build_layer_norm_graph(), |
| 123 | + CadenceWithLayerNormQuantizer(), |
| 124 | + torch.ops.aten.layer_norm.default, |
| 125 | + qconfig_A8W8.output_activation, |
| 126 | + # For layer_norm: only input_activation (weights/bias are passed as others) |
| 127 | + [qconfig_A8W8.input_activation], |
| 128 | + ), |
121 | 129 | ] |
122 | 130 |
|
123 | 131 | # Derive the set of tested quantizer classes from the test cases. |
@@ -243,6 +251,32 @@ def _build_softmax_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]: |
243 | 251 | self.assertEqual(len(softmax_nodes), 1, "Should find exactly one softmax node") |
244 | 252 | return gm, softmax_nodes[0] |
245 | 253 |
|
| 254 | + def _build_layer_norm_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]: |
| 255 | + """Build a simple graph with a layer_norm operation.""" |
| 256 | + builder = GraphBuilder() |
| 257 | + # Input shape: (batch, features) |
| 258 | + x = builder.placeholder("x", torch.randn(1, 10)) |
| 259 | + # normalized_shape must match the last dimension(s) of input |
| 260 | + normalized_shape = [10] |
| 261 | + layer_norm = builder.call_operator( |
| 262 | + op=torch.ops.aten.layer_norm.default, |
| 263 | + args=(x, normalized_shape), |
| 264 | + meta=NodeMetadata( |
| 265 | + {"source_fn_stack": [("layer_norm", torch.ops.aten.layer_norm.default)]} |
| 266 | + ), |
| 267 | + ) |
| 268 | + builder.output([layer_norm]) |
| 269 | + gm = builder.get_graph_module() |
| 270 | + |
| 271 | + layer_norm_nodes = gm.graph.find_nodes( |
| 272 | + op="call_function", |
| 273 | + target=torch.ops.aten.layer_norm.default, |
| 274 | + ) |
| 275 | + self.assertEqual( |
| 276 | + len(layer_norm_nodes), 1, "Should find exactly one layer_norm node" |
| 277 | + ) |
| 278 | + return gm, layer_norm_nodes[0] |
| 279 | + |
246 | 280 | @parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES) |
247 | 281 | def test_quantizer_annotation( |
248 | 282 | self, |
|
0 commit comments