Fix pylint error torch.tensor is not callable (#53424)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/53424
Fixes https://github.com/pytorch/pytorch/issues/24807 and supersedes the stale https://github.com/pytorch/pytorch/issues/25093 (Cc Microsheep). If you now run the reproduction
```python
import torch
if __name__ == "__main__":
t = torch.tensor([1, 2, 3], dtype=torch.float64)
```
with `pylint==2.6.0`, you get the following output
```
test_pylint.py:1:0: C0114: Missing module docstring (missing-module-docstring)
test_pylint.py:4:8: E1101: Module 'torch' has no 'tensor' member; maybe 'Tensor'? (no-
member)
test_pylint.py:4:38: E1101: Module 'torch' has no 'float64' member (no-member)
```
Now `pylint` doesn't recognize `torch.tensor` at all, but it is promoted in the stub. Given that it also doesn't recognize `torch.float64`, I think fixing this is out of scope of this PR.
---
## TL;DR
This BC-breaking only for users that rely on unintended behavior. Since `torch/__init__.py` loaded `torch/tensor.py` it was populated in `sys.modules`. `torch/__init__.py` then overwrote `torch.tensor` with the actual function. With this `import torch.tensor as tensor` does not fail, but returns the function rather than the module. Users that rely on this import need to change it to `from torch import tensor`.
Reviewed By: zou3519
Differential Revision: D26223815
Pulled By: bdhirsh
fbshipit-source-id: 125b9ff3d276e84a645cd7521e8d6160b1ca1c21
diff --git a/test/test_jit.py b/test/test_jit.py
index 6e9f4e8..12d3b6b 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -7374,7 +7374,7 @@
for op, tensor, const, swap_args in product(ops, tensors, consts, [True, False]):
# FIXME: things like 2 / long_tensor are not implemented correctly
- # Look in torch/tensor.py to see how pytorch implements it.
+ # Look in torch/_tensor.py to see how pytorch implements it.
if op == '/' and tensor.data_ptr() == long_tensor.data_ptr():
continue
diff --git a/torch/__init__.py b/torch/__init__.py
index cd9a69c..c85a266 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -476,7 +476,7 @@
# Define Storage and Tensor classes
################################################################################
-from .tensor import Tensor
+from ._tensor import Tensor
from .storage import _StorageBase
diff --git a/torch/tensor.py b/torch/_tensor.py
similarity index 100%
rename from torch/tensor.py
rename to torch/_tensor.py
diff --git a/torch/autograd/anomaly_mode.py b/torch/autograd/anomaly_mode.py
index 05f88db..f6ec361 100644
--- a/torch/autograd/anomaly_mode.py
+++ b/torch/autograd/anomaly_mode.py
@@ -38,7 +38,7 @@
>>> out.backward()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- File "/your/pytorch/install/torch/tensor.py", line 93, in backward
+ File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
@@ -57,7 +57,7 @@
out = MyFunc.apply(a)
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
- File "/your/pytorch/install/torch/tensor.py", line 93, in backward
+ File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index b9e4e95..2ac12f4 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -15,7 +15,7 @@
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject *unused) {
using namespace torch::autograd::profiler;
- auto tensor_module = THPObjectPtr(PyImport_ImportModule("torch.tensor"));
+ auto tensor_module = THPObjectPtr(PyImport_ImportModule("torch._tensor"));
if (!tensor_module)
return nullptr;
diff --git a/torch/functional.py b/torch/functional.py
index 5d907a2..ed23d81 100644
--- a/torch/functional.py
+++ b/torch/functional.py
@@ -152,7 +152,7 @@
split, (tensor,), tensor, split_size_or_sections, dim=dim)
# Overwriting reason:
# This dispatches to two ATen functions depending on the type of
- # split_size_or_sections. The branching code is in tensor.py, which we
+ # split_size_or_sections. The branching code is in _tensor.py, which we
# call here.
return tensor.split(split_size_or_sections, dim)
diff --git a/torch/serialization.py b/torch/serialization.py
index 6729adb..bb9fe05 100644
--- a/torch/serialization.py
+++ b/torch/serialization.py
@@ -843,7 +843,10 @@
storage = loaded_storages[key]
return storage
- load_module_mapping: Dict[str, str] = {}
+ load_module_mapping: Dict[str, str] = {
+ # See https://github.com/pytorch/pytorch/pull/51633
+ 'torch.tensor': 'torch._tensor'
+ }
# Need to subclass Unpickler instead of directly monkey-patching the find_class method
# because it's marked readonly in pickle.