in test_data_parallel.py, remove skipIfRocm from tests that pass (#34978)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/34978
Differential Revision: D20535920
Pulled By: mrshenli
fbshipit-source-id: 3baa8608dd3b0dd5578bc32e56a2e6c1fe69492d
diff --git a/test/distributed/test_data_parallel.py b/test/distributed/test_data_parallel.py
index d38da28..598bd94 100644
--- a/test/distributed/test_data_parallel.py
+++ b/test/distributed/test_data_parallel.py
@@ -7,7 +7,7 @@
from torch import nn
import torch.nn.parallel as dp
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
-from torch.testing._internal.common_utils import run_tests, TestCase, skipIfRocm, repeat_test_for_types, ALL_TENSORTYPES, PY3
+from torch.testing._internal.common_utils import run_tests, TestCase, repeat_test_for_types, ALL_TENSORTYPES, PY3
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
import torch.nn.functional as F
@@ -257,7 +257,6 @@
test(s.cuda(1), None, inp, [1, 0], should_fail=False)
@unittest.skipIf(not TEST_MULTIGPU or not PY3, "multi-GPU not supported")
- @skipIfRocm
def test_data_parallel_model_no_refcycles(self):
# Python 2.7 will create reference cycles with the following
# Module on multiple GPUs, but Python 3 shouldn't unless