Skip to content

Commit 99b0758

Browse files
[rocm6.4_internal_testing] remove xfail from 'batch_norm_with_update' (#1821)
remove `xfail` from `batch_norm_with_update` op in `test_grad` and `test_vmap_autograd_grad` these tests are passed now Fixes https://ontrack-internal.amd.com/browse/SWDEV-472564 cherry-picked from rocm6.3_internal_testing PR #1776
1 parent 0ce9f6e commit 99b0758

File tree

1 file changed

+0
-14
lines changed

1 file changed

+0
-14
lines changed

test/functorch/test_ops.py

-14
Original file line numberDiff line numberDiff line change
@@ -439,13 +439,6 @@ class TestOperators(TestCase):
439439
), # Works on ROCm
440440
xfail("torch.ops.aten._flash_attention_forward"),
441441
xfail("torch.ops.aten._efficient_attention_forward"),
442-
# RuntimeError: Expected contiguous tensor, but got
443-
# non-contiguous tensor for argument #2 'grad_output'
444-
decorate(
445-
"_batch_norm_with_update",
446-
decorator=expectedFailureIf(TEST_WITH_ROCM),
447-
device_type="cuda",
448-
),
449442
}
450443
),
451444
)
@@ -2375,13 +2368,6 @@ def fn(input, weight, bias):
23752368
skip("sparse.sampled_addmm", ""),
23762369
skip("sparse.mm", "reduce"),
23772370
skip("native_layer_norm", "", device_type="cpu"),
2378-
# RuntimeError: Expected contiguous tensor, but got
2379-
# non-contiguous tensor for argument #2 'grad_output'
2380-
decorate(
2381-
"_batch_norm_with_update",
2382-
decorator=expectedFailureIf(TEST_WITH_ROCM),
2383-
device_type="cuda",
2384-
),
23852371
},
23862372
)
23872373
@opsToleranceOverride(

0 commit comments

Comments
 (0)