Skip to content

Commit 0db3ba5

Browse files
RahulC7facebook-github-bot
authored andcommitted
Adding test for CadenceWakeWordQuantizer
Summary: As title. Differential Revision: D88898933
1 parent efece4f commit 0db3ba5

File tree

1 file changed

+31
-1
lines changed

1 file changed

+31
-1
lines changed

backends/cadence/aot/tests/test_quantizer_ops.py

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@
5555
CadenceNopQuantizer, # No-op quantizer, doesn't annotate anything
5656
CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage
5757
CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition
58-
CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage
5958
}
6059

6160

@@ -126,6 +125,15 @@
126125
# For layer_norm: only input_activation (weights/bias are passed as others)
127126
[qconfig_A8W8.input_activation],
128127
),
128+
(
129+
"add_A8W8",
130+
lambda self: self._build_add_graph(),
131+
CadenceWakeWordQuantizer(),
132+
torch.ops.aten.add.Tensor,
133+
qconfig_A8W8.output_activation,
134+
# For add: both inputs are activations
135+
[qconfig_A8W8.input_activation, qconfig_A8W8.input_activation],
136+
),
129137
]
130138

131139
# Derive the set of tested quantizer classes from the test cases.
@@ -277,6 +285,28 @@ def _build_layer_norm_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
277285
)
278286
return gm, layer_norm_nodes[0]
279287

288+
def _build_add_graph(self) -> tuple[torch.fx.GraphModule, torch.fx.Node]:
289+
"""Build a simple graph with an add operation."""
290+
builder = GraphBuilder()
291+
x = builder.placeholder("x", torch.randn(1, 10))
292+
y = builder.placeholder("y", torch.randn(1, 10))
293+
add = builder.call_operator(
294+
op=torch.ops.aten.add.Tensor,
295+
args=(x, y),
296+
meta=NodeMetadata(
297+
{"source_fn_stack": [("add", torch.ops.aten.add.Tensor)]}
298+
),
299+
)
300+
builder.output([add])
301+
gm = builder.get_graph_module()
302+
303+
add_nodes = gm.graph.find_nodes(
304+
op="call_function",
305+
target=torch.ops.aten.add.Tensor,
306+
)
307+
self.assertEqual(len(add_nodes), 1, "Should find exactly one add node")
308+
return gm, add_nodes[0]
309+
280310
@parameterized.expand(QUANTIZER_ANNOTATION_TEST_CASES)
281311
def test_quantizer_annotation(
282312
self,

0 commit comments

Comments
 (0)