Skip to content

Commit d89c44a

Browse files
RahulC7facebook-github-bot
authored andcommitted
Adding test for CadenceWith16BitConvActivationsQuantizer (#16205)
Summary: Add annotation tests for CadenceWith16BitConvActivationsQuantizer covering both conv1d and conv2d operations. https://www.internalfb.com/code/fbsource/[01c566b03c670b1869136cbb64f25d16d730c8d4]/fbcode/executorch/backends/cadence/aot/quantizer/quantizer.py?lines=384-396 Reviewed By: hsharma35 Differential Revision: D88895865
1 parent 227ff2e commit d89c44a

File tree

1 file changed

+66
-1
lines changed

1 file changed

+66
-1
lines changed

backends/cadence/aot/tests/test_quantizer_ops.py

Lines changed: 66 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@
5656
CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage
5757
CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition
5858
CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage
59-
CadenceWith16BitConvActivationsQuantizer, # TODO: T247438221 Add test coverage
6059
CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage
6160
CadenceWithSoftmaxQuantizer, # TODO: T247438418 Add test coverage
6261
}
@@ -93,6 +92,24 @@
9392
# For linear: [input_activation, weight]
9493
[qconfig_A16.input_activation, qconfig_A16.weight],
9594
),
95+
(
96+
"conv1d_A16",
97+
lambda self: self._build_conv1d_graph(),
98+
CadenceWith16BitConvActivationsQuantizer(),
99+
torch.ops.aten.conv1d.default,
100+
qconfig_A16.output_activation,
101+
# For conv1d: [input_activation, weight]
102+
[qconfig_A16.input_activation, qconfig_A16.weight],
103+
),
104+
(
105+
"conv2d_A16",
106+
lambda self: self._build_conv2d_graph(),
107+
CadenceWith16BitConvActivationsQuantizer(),
108+
torch.ops.aten.conv2d.default,
109+
qconfig_A16.output_activation,
110+
# For conv2d: [input_activation, weight]
111+
[qconfig_A16.input_activation, qconfig_A16.weight],
112+
),
96113
]
97114

98115
# Derive the set of tested quantizer classes from the test cases.
@@ -149,6 +166,54 @@ def _build_linear_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
149166
self.assertEqual(len(linear_nodes), 1, "Should find exactly one linear node")
150167
return gm, linear_nodes[0]
151168

169+
def _build_conv1d_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
170+
"""Build a simple graph with a conv1d operation (no bias)."""
171+
builder = GraphBuilder()
172+
# Input shape: (batch, in_channels, length)
173+
x = builder.placeholder("x", torch.randn(1, 3, 10))
174+
# Weight shape: (out_channels, in_channels, kernel_size)
175+
weight = builder.placeholder("weight", torch.randn(6, 3, 3))
176+
conv1d = builder.call_operator(
177+
op=torch.ops.aten.conv1d.default,
178+
args=(x, weight),
179+
meta=NodeMetadata(
180+
{"source_fn_stack": [("conv1d", torch.ops.aten.conv1d.default)]}
181+
),
182+
)
183+
builder.output([conv1d])
184+
gm = builder.get_graph_module()
185+
186+
conv1d_nodes = gm.graph.find_nodes(
187+
op="call_function",
188+
target=torch.ops.aten.conv1d.default,
189+
)
190+
self.assertEqual(len(conv1d_nodes), 1, "Should find exactly one conv1d node")
191+
return gm, conv1d_nodes[0]
192+
193+
def _build_conv2d_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
194+
"""Build a simple graph with a conv2d operation (no bias)."""
195+
builder = GraphBuilder()
196+
# Input shape: (batch, in_channels, height, width)
197+
x = builder.placeholder("x", torch.randn(1, 3, 8, 8))
198+
# Weight shape: (out_channels, in_channels, kernel_h, kernel_w)
199+
weight = builder.placeholder("weight", torch.randn(6, 3, 3, 3))
200+
conv2d = builder.call_operator(
201+
op=torch.ops.aten.conv2d.default,
202+
args=(x, weight),
203+
meta=NodeMetadata(
204+
{"source_fn_stack": [("conv2d", torch.ops.aten.conv2d.default)]}
205+
),
206+
)
207+
builder.output([conv2d])
208+
gm = builder.get_graph_module()
209+
210+
conv2d_nodes = gm.graph.find_nodes(
211+
op="call_function",
212+
target=torch.ops.aten.conv2d.default,
213+
)
214+
self.assertEqual(len(conv2d_nodes), 1, "Should find exactly one conv2d node")
215+
return gm, conv2d_nodes[0]
216+
152217
@parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES)
153218
def test_quantizer_annotation(
154219
self,

0 commit comments

Comments
 (0)