Revert "Remove fixed skips (#108674)"
This reverts commit 518cfda2dd0e940603c74717b4cb33493a9ec908.
Reverted https://github.com/pytorch/pytorch/pull/108674 on behalf of https://github.com/huydhn due to Sorry for reverting this, but one test is failing on inductor https://hud.pytorch.org/pytorch/pytorch/commit/518cfda2dd0e940603c74717b4cb33493a9ec908, and it seems easier to revert this than disabling the test ([comment](https://github.com/pytorch/pytorch/pull/108674#issuecomment-1709310192))
diff --git a/test/test_ops.py b/test/test_ops.py
index 1680615..8830a52 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -648,6 +648,7 @@
# Cases test here:
# - out= with the correct dtype and device, but the wrong shape
@ops(_ops_and_refs, dtypes=OpDTypes.none)
+ @skipIfTorchInductor("Inductor does not support complex dtype yet")
def test_out_warning(self, device, op):
# Prefers running in float32 but has a fallback for the first listed supported dtype
supported_dtypes = op.supported_dtypes(self.device_type)
@@ -776,6 +777,7 @@
# - if device, dtype are NOT passed, any combination of dtype/device should be OK for out
# - if device, dtype are passed, device and dtype should match
@ops(_ops_and_refs, dtypes=OpDTypes.any_one)
+ @skipIfTorchInductor("Inductor does not support complex dtype yet")
def test_out(self, device, dtype, op):
# Prefers running in float32 but has a fallback for the first listed supported dtype
samples = op.sample_inputs(device, dtype)
@@ -995,6 +997,7 @@
# same values for the cross-product of op variants (method, inplace)
# against eager's gold standard op function variant
@_variant_ops(op_db)
+ @skipIfTorchInductor("Inductor does not support complex dtype yet")
def test_variant_consistency_eager(self, device, dtype, op):
# Acquires variants (method variant, inplace variant, operator variant, inplace_operator variant, aliases)
@@ -1176,6 +1179,7 @@
# Reference testing for operations in complex32 against complex64.
# NOTE: We test against complex64 as NumPy doesn't have a complex32 equivalent dtype.
@ops(op_db, allowed_dtypes=(torch.complex32,))
+ @skipIfTorchInductor("Inductor does not support complex dtype yet")
def test_complex_half_reference_testing(self, device, dtype, op):
if not op.supports_dtype(torch.complex32, device):
unittest.skip("Does not support complex32")
@@ -1206,6 +1210,7 @@
@ops(op_db, allowed_dtypes=(torch.bool,))
@unittest.skipIf(TEST_WITH_UBSAN, "Test uses undefined behavior")
+ @skipIfTorchInductor("Inductor does not support view with dtype yet")
def test_non_standard_bool_values(self, device, dtype, op):
# Test boolean values other than 0x00 and 0x01 (gh-54789)
def convert_boolean_tensors(x):
diff --git a/test/test_torch.py b/test/test_torch.py
index c8d18c7..ffcf3bf 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -8383,6 +8383,7 @@
self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype)
self.assertEqual(b.device, b.to(dtype=torch.int32).device)
+ @skipIfTorchInductor("FIXME")
def test_to(self):
self._test_to_with_layout(torch.strided)
is_cuda10_2_or_higher = (