[BE] Enable ruff's UP rules in pyproject.toml (#105437)
Signed-off-by: Justin Chu <justinchu@microsoft.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105437
Approved by: https://github.com/huydhn, https://github.com/malfet, https://github.com/Skylion007
diff --git a/pyproject.toml b/pyproject.toml
index 69afc67..5cfea34 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -55,6 +55,8 @@
"SIM116", # Disable Use a dictionary instead of consecutive `if` statements
"SIM117",
"SIM118",
+ "UP006", # keep-runtime-typing
+ "UP007", # keep-runtime-typing
]
line-length = 120
select = [
@@ -66,6 +68,7 @@
"SIM1",
"W",
# Not included in flake8
+ "UP",
"PERF",
"PLE",
"TRY302",
@@ -73,6 +76,13 @@
[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
+"test/jit/**" = [
+ "UP", # We don't want to modify the jit test as they test specify syntax
+]
+"torch/onnx/**" = [
+ "UP037", # ONNX does runtime type checking
+]
+
"torchgen/api/types/__init__.py" = [
"F401",
"F403",
@@ -81,3 +91,6 @@
"F401",
"F403",
]
+"torch/utils/collect_env.py" = [
+ "UP", # collect_env.py needs to work with older versions of Python
+]
diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index 9bf111f..5908b9b 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -518,7 +518,7 @@
numerical_testsuit = [4.4, 4.5, 4.6, 5.5]
for numerical_number in numerical_testsuit:
- x = torch.ones((17)) * numerical_number
+ x = torch.ones(17) * numerical_number
with config.patch({"cpp.simdlen": None}):
torch._dynamo.reset()
metrics.reset()
diff --git a/test/mobile/test_lite_script_type.py b/test/mobile/test_lite_script_type.py
index 913c527..aee61fe 100644
--- a/test/mobile/test_lite_script_type.py
+++ b/test/mobile/test_lite_script_type.py
@@ -14,7 +14,7 @@
class TestLiteScriptModule(TestCase):
def test_typing_namedtuple(self):
- myNamedTuple = NamedTuple('myNamedTuple', [('a', List[torch.Tensor])])
+ myNamedTuple = NamedTuple('myNamedTuple', [('a', List[torch.Tensor])]) # noqa: UP014
class MyTestModule(torch.nn.Module):
def forward(self, a: torch.Tensor):
diff --git a/test/nn/test_pooling.py b/test/nn/test_pooling.py
index 55785ca..57b423f 100644
--- a/test/nn/test_pooling.py
+++ b/test/nn/test_pooling.py
@@ -1209,9 +1209,10 @@
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
- cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
+ cls_name = 'AdaptiveMaxPool{}d'.format(num_dim) # noqa: UP032
else:
- cls_name = 'MaxPool{}d'.format(num_dim)
+ # FIXME(#105716): Test fails when using f-string
+ cls_name = 'MaxPool{}d'.format(num_dim) # noqa: UP032
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
diff --git a/test/test_jit.py b/test/test_jit.py
index 984dc6b..f5eeebd 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -14257,7 +14257,7 @@
self.assertEqual(out, torch.tensor(6.0))
def test_namedtuple_type_inference(self):
- _AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)])
+ _AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)]) # noqa: UP014
_UnannotatedNamedTuple = namedtuple('_NamedTupleUnAnnotated', ['value'])
def test_check_named_tuple_value():