|
56 | 56 | CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage |
57 | 57 | CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition |
58 | 58 | CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage |
59 | | - CadenceWith16BitConvActivationsQuantizer, # TODO: T247438221 Add test coverage |
60 | 59 | CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage |
61 | 60 | CadenceWithSoftmaxQuantizer, # TODO: T247438418 Add test coverage |
62 | 61 | } |
|
93 | 92 | # For linear: [input_activation, weight] |
94 | 93 | [qconfig_A16.input_activation, qconfig_A16.weight], |
95 | 94 | ), |
| 95 | + ( |
| 96 | + "conv1d_A16", |
| 97 | + lambda self: self._build_conv1d_graph(), |
| 98 | + CadenceWith16BitConvActivationsQuantizer(), |
| 99 | + torch.ops.aten.conv1d.default, |
| 100 | + qconfig_A16.output_activation, |
| 101 | + # For conv1d: [input_activation, weight] |
| 102 | + [qconfig_A16.input_activation, qconfig_A16.weight], |
| 103 | + ), |
| 104 | + ( |
| 105 | + "conv2d_A16", |
| 106 | + lambda self: self._build_conv2d_graph(), |
| 107 | + CadenceWith16BitConvActivationsQuantizer(), |
| 108 | + torch.ops.aten.conv2d.default, |
| 109 | + qconfig_A16.output_activation, |
| 110 | + # For conv2d: [input_activation, weight] |
| 111 | + [qconfig_A16.input_activation, qconfig_A16.weight], |
| 112 | + ), |
96 | 113 | ] |
97 | 114 |
|
98 | 115 | # Derive the set of tested quantizer classes from the test cases. |
@@ -149,6 +166,54 @@ def _build_linear_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]: |
149 | 166 | self.assertEqual(len(linear_nodes), 1, "Should find exactly one linear node") |
150 | 167 | return gm, linear_nodes[0] |
151 | 168 |
|
| 169 | + def _build_conv1d_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]: |
| 170 | + """Build a simple graph with a conv1d operation (no bias).""" |
| 171 | + builder = GraphBuilder() |
| 172 | + # Input shape: (batch, in_channels, length) |
| 173 | + x = builder.placeholder("x", torch.randn(1, 3, 10)) |
| 174 | + # Weight shape: (out_channels, in_channels, kernel_size) |
| 175 | + weight = builder.placeholder("weight", torch.randn(6, 3, 3)) |
| 176 | + conv1d = builder.call_operator( |
| 177 | + op=torch.ops.aten.conv1d.default, |
| 178 | + args=(x, weight), |
| 179 | + meta=NodeMetadata( |
| 180 | + {"source_fn_stack": [("conv1d", torch.ops.aten.conv1d.default)]} |
| 181 | + ), |
| 182 | + ) |
| 183 | + builder.output([conv1d]) |
| 184 | + gm = builder.get_graph_module() |
| 185 | + |
| 186 | + conv1d_nodes = gm.graph.find_nodes( |
| 187 | + op="call_function", |
| 188 | + target=torch.ops.aten.conv1d.default, |
| 189 | + ) |
| 190 | + self.assertEqual(len(conv1d_nodes), 1, "Should find exactly one conv1d node") |
| 191 | + return gm, conv1d_nodes[0] |
| 192 | + |
| 193 | + def _build_conv2d_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]: |
| 194 | + """Build a simple graph with a conv2d operation (no bias).""" |
| 195 | + builder = GraphBuilder() |
| 196 | + # Input shape: (batch, in_channels, height, width) |
| 197 | + x = builder.placeholder("x", torch.randn(1, 3, 8, 8)) |
| 198 | + # Weight shape: (out_channels, in_channels, kernel_h, kernel_w) |
| 199 | + weight = builder.placeholder("weight", torch.randn(6, 3, 3, 3)) |
| 200 | + conv2d = builder.call_operator( |
| 201 | + op=torch.ops.aten.conv2d.default, |
| 202 | + args=(x, weight), |
| 203 | + meta=NodeMetadata( |
| 204 | + {"source_fn_stack": [("conv2d", torch.ops.aten.conv2d.default)]} |
| 205 | + ), |
| 206 | + ) |
| 207 | + builder.output([conv2d]) |
| 208 | + gm = builder.get_graph_module() |
| 209 | + |
| 210 | + conv2d_nodes = gm.graph.find_nodes( |
| 211 | + op="call_function", |
| 212 | + target=torch.ops.aten.conv2d.default, |
| 213 | + ) |
| 214 | + self.assertEqual(len(conv2d_nodes), 1, "Should find exactly one conv2d node") |
| 215 | + return gm, conv2d_nodes[0] |
| 216 | + |
152 | 217 | @parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES) |
153 | 218 | def test_quantizer_annotation( |
154 | 219 | self, |
|
0 commit comments