Skip to content

Commit ba6e106

Browse files
committed
[rocm6.4_internal_testing] remove xfail from 'batch_norm_with_update' (#1821)
remove `xfail` from `batch_norm_with_update` op in `test_grad` and `test_vmap_autograd_grad` these tests are passed now Fixes https://ontrack-internal.amd.com/browse/SWDEV-472564 cherry-picked from rocm6.3_internal_testing PR #1776 (cherry picked from commit 99b0758)
1 parent 5f52c77 commit ba6e106

File tree

1 file changed

+0
-14
lines changed

1 file changed

+0
-14
lines changed

test/functorch/test_ops.py

-14
Original file line numberDiff line numberDiff line change
@@ -436,13 +436,6 @@ class TestOperators(TestCase):
436436
), # Works on ROCm
437437
xfail("torch.ops.aten._flash_attention_forward"),
438438
xfail("torch.ops.aten._efficient_attention_forward"),
439-
# RuntimeError: Expected contiguous tensor, but got
440-
# non-contiguous tensor for argument #2 'grad_output'
441-
decorate(
442-
"_batch_norm_with_update",
443-
decorator=expectedFailureIf(TEST_WITH_ROCM),
444-
device_type="cuda",
445-
),
446439
}
447440
),
448441
)
@@ -2368,13 +2361,6 @@ def fn(input, weight, bias):
23682361
skip("sparse.sampled_addmm", ""),
23692362
skip("sparse.mm", "reduce"),
23702363
skip("native_layer_norm", "", device_type="cpu"),
2371-
# RuntimeError: Expected contiguous tensor, but got
2372-
# non-contiguous tensor for argument #2 'grad_output'
2373-
decorate(
2374-
"_batch_norm_with_update",
2375-
decorator=expectedFailureIf(TEST_WITH_ROCM),
2376-
device_type="cuda",
2377-
),
23782364
},
23792365
)
23802366
@opsToleranceOverride(

0 commit comments

Comments
 (0)