Skip to content

Commit 0f1d47d

Browse files
dnikolaev-amdAMD AMD
authored and
AMD AMD
committed
[rocm6.4_internal_testing] remove xfail from 'batch_norm_with_update' (#1821)
remove `xfail` from `batch_norm_with_update` op in `test_grad` and `test_vmap_autograd_grad` these tests are passed now Fixes https://ontrack-internal.amd.com/browse/SWDEV-472564 cherry-picked from rocm6.3_internal_testing PR #1776
1 parent 89f9b8d commit 0f1d47d

File tree

1 file changed

+0
-14
lines changed

1 file changed

+0
-14
lines changed

test/functorch/test_ops.py

-14
Original file line numberDiff line numberDiff line change
@@ -438,13 +438,6 @@ class TestOperators(TestCase):
438438
), # Works on ROCm
439439
xfail("torch.ops.aten._flash_attention_forward"),
440440
xfail("torch.ops.aten._efficient_attention_forward"),
441-
# RuntimeError: Expected contiguous tensor, but got
442-
# non-contiguous tensor for argument #2 'grad_output'
443-
decorate(
444-
"_batch_norm_with_update",
445-
decorator=expectedFailureIf(TEST_WITH_ROCM),
446-
device_type="cuda",
447-
),
448441
}
449442
),
450443
)
@@ -2394,13 +2387,6 @@ def fn(input, weight, bias):
23942387
skip("sparse.sampled_addmm", ""),
23952388
skip("sparse.mm", "reduce"),
23962389
skip("native_layer_norm", "", device_type="cpu"),
2397-
# RuntimeError: Expected contiguous tensor, but got
2398-
# non-contiguous tensor for argument #2 'grad_output'
2399-
decorate(
2400-
"_batch_norm_with_update",
2401-
decorator=expectedFailureIf(TEST_WITH_ROCM),
2402-
device_type="cuda",
2403-
),
24042390
},
24052391
)
24062392
@opsToleranceOverride(

0 commit comments

Comments
 (0)