Updates torch.clone documentation to be consistent with other functions (#43098)
Summary:
`torch.clone` exists but was undocumented, and the method incorrectly listed `memory_format` as a positional argument. This:
- documents `torch.clone`
- lists `memory_format` as a keyword-only argument
- wordsmiths the documentation
Pull Request resolved: https://github.com/pytorch/pytorch/pull/43098
Reviewed By: ngimel
Differential Revision: D23153397
Pulled By: mruberry
fbshipit-source-id: c2ea781cdcb8b5ad3f04987c2b3a2f1fe0eaf18b
diff --git a/docs/source/torch.rst b/docs/source/torch.rst
index 5a490e0..b731758 100644
--- a/docs/source/torch.rst
+++ b/docs/source/torch.rst
@@ -410,6 +410,7 @@
bucketize
cartesian_prod
cdist
+ clone
combinations
cross
cummax
diff --git a/torch/_tensor_docs.py b/torch/_tensor_docs.py
index 25ed513..9234499 100644
--- a/torch/_tensor_docs.py
+++ b/torch/_tensor_docs.py
@@ -799,21 +799,10 @@
Alias for :meth:`~Tensor.clamp_`.
""")
-add_docstr_all('clone',
- r"""
-clone(memory_format=torch.preserve_format) -> Tensor
+add_docstr_all('clone', r"""
+clone(*, memory_format=torch.preserve_format) -> Tensor
-Returns a copy of the :attr:`self` tensor. The copy has the same size and data
-type as :attr:`self`.
-
-.. note::
-
- This function is differentiable and so gradients will flow back to the original
- Tensor. If you want to get a Tensor that is independent from the point of view
- of the autograd, see :meth:`~Tensor.detach`.
-
-Args:
- {memory_format}
+See :func:`torch.clone`
""".format(**common_args))
add_docstr_all('contiguous',
diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
index 9131b07..bd51575 100644
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -31,6 +31,8 @@
input (Tensor): the input tensor.
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
+ memory_format (:class:`torch.memory_format`, optional): the desired memory format of
+ returned tensor. Default: ``torch.preserve_format``.
""")
reduceops_common_args = merge_dicts(common_args, parse_kwargs("""
@@ -1519,6 +1521,24 @@
[-0.0889, 0.2122, 0.1412]])
""")
+add_docstr(torch.clone, r"""
+clone(input, *, memory_format=torch.preserve_format) -> Tensor
+
+Returns a copy of :attr:`input`.
+
+.. note::
+
+ This function is differentiable, so gradients will flow back from the
+ result of this operation to :attr:`input`. To create a tensor without an
+ autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
+
+Args:
+ {input}
+
+Keyword args:
+ {memory_format}
+""".format(**common_args))
+
add_docstr(torch.clamp, r"""
clamp(input, min, max, out=None) -> Tensor