| # ${generated_comment} |
| |
| from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload |
| from torch._six import inf |
| |
| import builtins |
| |
| # These identifiers are reexported from other modules. These modules |
| # are not mypy-clean yet, so in order to use this stub file usefully |
| # from mypy you will need to specify --follow-imports=silent. |
| # Not all is lost: these imports still enable IDEs like PyCharm to offer |
| # autocomplete. |
| # |
| # Note: Why does the syntax here look so strange? Import visibility |
| # rules in stubs are different from normal Python files! You must use |
| # 'from ... import ... as ...' syntax to cause an identifier to be |
| # exposed (or use a wildcard); regular syntax is not exposed. |
| from .random import set_rng_state as set_rng_state, get_rng_state as get_rng_state, \ |
| manual_seed as manual_seed, initial_seed as initial_seed, seed as seed |
| from ._tensor_str import set_printoptions as set_printoptions |
| from .functional import * |
| from .serialization import save as save, load as load |
| from .autograd import no_grad as no_grad, enable_grad as enable_grad, \ |
| set_grad_enabled as set_grad_enabled |
| from . import cuda as cuda |
| from . import optim as optim |
| |
| class dtype: ... |
| |
| class layout: ... |
| |
| strided : layout = ... |
| |
| class memory_format: ... |
| |
| contiguous_format: memory_format = ... |
| |
| class qscheme: ... |
| |
| per_tensor_affine: qscheme = ... |
| |
| # See https://github.com/python/mypy/issues/4146 for why these workarounds |
| # is necessary |
| _int = builtins.int |
| _float = builtins.float |
| |
| class device: |
| type: str |
| index: _int |
| |
| @overload |
| def __init__(self, device: Union[_int, str]) -> None: ... |
| |
| @overload |
| def __init__(self, type: str, index: _int) -> None: ... |
| |
| class Size(tuple): ... |
| |
| class Storage: ... |
| |
| # See https://github.com/python/mypy/issues/4146 for why these workarounds |
| # is necessary |
| _dtype = dtype |
| _device = device |
| _qscheme = qscheme |
| _size = Union[Size, List[_int], Tuple[_int, ...]] |
| |
| # Meta-type for "numeric" things; matches our docs |
| Number = Union[builtins.int, builtins.float] |
| |
| class Generator: |
| device: _device = ... |
| |
| @overload |
| def __init__(self, device: Optional[_device]=None) -> None: ... |
| |
| @overload |
| def __init__(self, device: Union[_int, str]) -> None: ... |
| |
| # TODO: One downside of doing it this way, is direct use of |
| # torch.tensor.Tensor doesn't get type annotations. Nobody |
| # should really do that, so maybe this is not so bad. |
| class Tensor: |
| dtype: _dtype = ... |
| shape: Size = ... |
| device: _device = ... |
| requires_grad: bool = ... |
| grad: Optional[Tensor] = ... |
| |
| ${tensor_method_hints} |
| |
| # Manually defined methods from torch/tensor.py |
| def backward(self, gradient: Optional[Tensor]=None, retain_graph: Optional[bool]=None, create_graph: bool=False) -> None: ... |
| def register_hook(self, hook: Callable) -> Any: ... |
| def retain_grad(self) -> None: ... |
| def is_pinned(self) -> bool: ... |
| def is_shared(self) -> bool: ... |
| def share_memory_(self) -> None: ... |
| # TODO: fill in the types for these, or otherwise figure out some |
| # way to not have to write these out again... |
| def nonzero(self, *, as_tuple=True): ... |
| def norm(self, p="fro", dim=None, keepdim=False): ... |
| def stft(self, n_fft, hop_length=None, win_length=None, window=None, |
| center=True, pad_mode='reflect', normalized=False, onesided=True): ... |
| def split(self, split_size, dim=0): ... |
| def unique(self, sorted=True, return_inverse=False, dim=None): ... |
| def unique_consecutive(self, sorted=True, return_inverse=False, return_counts=False, dim=None): ... |
| def lu(self, pivot=True, get_infos=False): ... |
| |
| ${function_hints} |
| |
| ${legacy_class_hints} |
| |
| ${dtype_class_hints} |
| |
| # Pure Python functions defined in torch/__init__.py |
| |
| def typename(obj) -> str: ... |
| def is_tensor(obj) -> bool: ... |
| def is_storage(obj) -> bool: ... |
| def set_default_tensor_type(type) -> None: ... # ick, what a bad legacy API |
| def set_default_dtype(d : _dtype) -> None: ... |
| def manager_path() -> str: ... |
| def compiled_with_cxx11_abi() -> bool: ... |
| |
| # The return value of this function depends on the value of `as_tuple`, |
| # (similar to `unique`, `lu`, etc.); as such, it is not |
| # possible to type correctly |
| def nonzero(input: Tensor, *, out: Optional[Tensor]=None, as_tuple: Optional[bool]=None): ... |