From cf54bf75a80c1e365df46ae7d0fc51b69b332994 Mon Sep 17 00:00:00 2001 From: chrjoh01 Date: Mon, 16 Mar 2026 09:46:16 +0000 Subject: [PATCH] Arm backend: Up tolerance for conv2d, upsample_bilinear2d, etc. Increased the tolerance for: - conv2 on TOSA FP/VGF - ic3 on TOSA FP16 - T5 on TOSA INT/VGF Signed-off-by: Christoffer J.L. Change-Id: I42e912a4b609f93adeb5a55a9a71e8121368fae8 --- .../arm/test/models/test_T5ForConditionalGeneration_arm.py | 5 +++-- backends/arm/test/models/test_inception_v3_arm.py | 2 +- backends/arm/test/ops/test_conv2d.py | 4 ++++ backends/arm/test/ops/test_upsample_bilinear2d.py | 2 ++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/backends/arm/test/models/test_T5ForConditionalGeneration_arm.py b/backends/arm/test/models/test_T5ForConditionalGeneration_arm.py index 7482928158b..7df97c6a199 100644 --- a/backends/arm/test/models/test_T5ForConditionalGeneration_arm.py +++ b/backends/arm/test/models/test_T5ForConditionalGeneration_arm.py @@ -117,7 +117,8 @@ def test_t5_for_conditional_generation_tosa_INT(): aten_op=[], exir_op=[], use_to_edge_transform_and_lower=True, - atol=5, # TODO: MLETORCH-1703: Reduce the tolerance of quantized T5ForConditionalGeneration + atol=14, # TODO: MLETORCH-1703: Reduce the tolerance of quantized T5ForConditionalGeneration + frobenius_threshold=0.3, ) pipeline.change_args( "check_count.exir", @@ -164,7 +165,7 @@ def test_t5_for_conditional_generation_vgf_quant(): aten_op=[], exir_op=[], use_to_edge_transform_and_lower=True, - atol=5, # TODO: MLETORCH-1703: Reduce the tolerance of quantized T5ForConditionalGeneration + atol=14, # TODO: MLETORCH-1703: Reduce the tolerance of quantized T5ForConditionalGeneration quantize=True, ) pipeline.change_args( diff --git a/backends/arm/test/models/test_inception_v3_arm.py b/backends/arm/test/models/test_inception_v3_arm.py index f842ea1f265..e71bb960bb3 100644 --- a/backends/arm/test/models/test_inception_v3_arm.py +++ b/backends/arm/test/models/test_inception_v3_arm.py @@ -56,7 +56,7 @@ def test_ic3_tosa_FP_fp16(): aten_op=[], exir_op=[], use_to_edge_transform_and_lower=True, - atol=5e-2, + atol=0.2, ) pipeline.run() diff --git a/backends/arm/test/ops/test_conv2d.py b/backends/arm/test/ops/test_conv2d.py index 1a9ed1bde18..cf98e0f84c9 100644 --- a/backends/arm/test/ops/test_conv2d.py +++ b/backends/arm/test/ops/test_conv2d.py @@ -483,6 +483,8 @@ def test_convolution_2d_tosa_FP(test_data): aten_op, exir_op, tosa_extensions=["bf16"], + atol=3e-3, + rtol=3e-3, ) pipeline.run() @@ -593,6 +595,8 @@ def test_convolution_2d_vgf_no_quant(test_data): aten_op, exir_op, quantize=False, + atol=3e-3, + rtol=3e-3, ) pipeline.run() diff --git a/backends/arm/test/ops/test_upsample_bilinear2d.py b/backends/arm/test/ops/test_upsample_bilinear2d.py index 6d7b1711ca2..95cf57af98a 100644 --- a/backends/arm/test/ops/test_upsample_bilinear2d.py +++ b/backends/arm/test/ops/test_upsample_bilinear2d.py @@ -426,6 +426,8 @@ def test_upsample_bilinear2d_vec_vgf_no_quant_UpsamplingBilinear2d( aten_op, exir_op, quantize=False, + atol=2e-3, + rtol=2e-3, ) if not compare: pipeline.pop_stage(-1)