Fix docstrings on torch/nn/modules (#113260)
Fixes #112598
## Description
Fixes the docstrings on following files.
```bash
pydocstyle path-to-file --count
```
| File | Count |
| ------------------------------------- | ------- |
| torch/nn/modules/adaptive.py | 20 -> 4 |
| torch/nn/modules/channelshuffle.py | 7 -> 4 |
| torch/nn/modules/conv.py | 37 -> 25 |
| torch/nn/modules/distance.py | 7 -> 5 |
| torch/nn/modules/dropout.py | 17 -> 7 |
| torch/nn/modules/flatten.py | 10 -> 7 |
| torch/nn/modules/fold.py | 11 -> 7 |
| torch/nn/modules/instancenorm.py | 13 -> 1 |
| torch/nn/modules/lazy.py | 11 -> 2 |
| torch/nn/modules/linear.py | 20 -> 14 |
| torch/nn/modules/normalization.py | 25 -> 16 |
| torch/nn/modules/padding.py | 33 -> 19 |
Pull Request resolved: https://github.com/pytorch/pytorch/pull/113260
Approved by: https://github.com/mikaylagawarecki
diff --git a/torch/nn/modules/adaptive.py b/torch/nn/modules/adaptive.py
index fd69b1c..3d61e9d 100644
--- a/torch/nn/modules/adaptive.py
+++ b/torch/nn/modules/adaptive.py
@@ -16,7 +16,9 @@
class AdaptiveLogSoftmaxWithLoss(Module):
- r"""Efficient softmax approximation as described in
+ r"""Efficient softmax approximation.
+
+ As described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
Moustapha Cissé, David Grangier, and Hervé Jégou
<https://arxiv.org/abs/1609.04309>`__.
@@ -242,9 +244,7 @@
return _ASMoutput(output, loss)
def _get_full_log_prob(self, input, head_output):
- """ Given input tensor, and output of `self.head`,
- compute the log of the full distribution """
-
+ """Given input tensor, and output of ``self.head``, compute the log of the full distribution."""
out = input.new_empty((head_output.size(0), self.n_classes))
head_logprob = log_softmax(head_output, dim=1)
@@ -260,7 +260,7 @@
return out
def log_prob(self, input: Tensor) -> Tensor:
- r""" Computes log probabilities for all :math:`\texttt{n\_classes}`
+ r"""Compute log probabilities for all :math:`\texttt{n\_classes}`.
Args:
input (Tensor): a minibatch of examples
@@ -275,13 +275,13 @@
- Output: :math:`(N, \texttt{n\_classes})`
"""
-
head_output = self.head(input)
return self._get_full_log_prob(input, head_output)
def predict(self, input: Tensor) -> Tensor:
- r""" This is equivalent to `self.log_prob(input).argmax(dim=1)`,
- but is more efficient in some cases.
+ r"""Return the class with the highest probability for each example in the input minibatch.
+
+ This is equivalent to ``self.log_prob(input).argmax(dim=1)``, but is more efficient in some cases.
Args:
input (Tensor): a minibatch of examples
@@ -293,7 +293,6 @@
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N)`
"""
-
head_output = self.head(input)
output = torch.argmax(head_output, dim=1)
not_in_shortlist = (output >= self.shortlist_size)
diff --git a/torch/nn/modules/channelshuffle.py b/torch/nn/modules/channelshuffle.py
index b30391f..a950868 100644
--- a/torch/nn/modules/channelshuffle.py
+++ b/torch/nn/modules/channelshuffle.py
@@ -6,8 +6,10 @@
__all__ = ['ChannelShuffle']
class ChannelShuffle(Module):
- r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
- into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
+ r"""Divides and rearranges the channels in a tensor.
+
+ This operation divides the channels in a tensor of shape :math:`(*, C , H, W)`
+ into g groups and rearranges them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
Args:
@@ -40,6 +42,7 @@
[15, 16]],
]]
"""
+
__constants__ = ['groups']
groups: int
diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py
index cb018d9..3f89453 100644
--- a/torch/nn/modules/conv.py
+++ b/torch/nn/modules/conv.py
@@ -1189,9 +1189,9 @@
# LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv1d(_LazyConvXdMixin, Conv1d): # type: ignore[misc]
- r"""A :class:`torch.nn.Conv1d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`Conv1d` that is inferred from
- the ``input.size(1)``.
+ r"""A :class:`torch.nn.Conv1d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`Conv1d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
@@ -1258,9 +1258,9 @@
# LazyConv2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv2d(_LazyConvXdMixin, Conv2d): # type: ignore[misc]
- r"""A :class:`torch.nn.Conv2d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`Conv2d` that is inferred from
- the ``input.size(1)``.
+ r"""A :class:`torch.nn.Conv2d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`Conv2d` that is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
@@ -1327,8 +1327,9 @@
# LazyConv3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConv3d(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
- r"""A :class:`torch.nn.Conv3d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`Conv3d` that is inferred from
+ r"""A :class:`torch.nn.Conv3d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`Conv3d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
@@ -1396,8 +1397,9 @@
# LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc]
- r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
+ r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
@@ -1464,8 +1466,9 @@
# LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d): # type: ignore[misc]
- r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`ConvTranspose2d` that is inferred from
+ r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`ConvTranspose2d` is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
@@ -1532,8 +1535,9 @@
# LazyConvTranspose3d defines weight as a Tensor but derived class defines it as UnitializeParameter
class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
- r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of
- the ``in_channels`` argument of the :class:`ConvTranspose3d` that is inferred from
+ r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of the ``in_channels`` argument.
+
+ The ``in_channels`` argument of the :class:`ConvTranspose3d` is inferred from
the ``input.size(1)``.
The attributes that will be lazily initialized are `weight` and `bias`.
diff --git a/torch/nn/modules/distance.py b/torch/nn/modules/distance.py
index 83478a2..cbf9866 100644
--- a/torch/nn/modules/distance.py
+++ b/torch/nn/modules/distance.py
@@ -38,6 +38,7 @@
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
+
__constants__ = ['norm', 'eps', 'keepdim']
norm: float
eps: float
@@ -74,6 +75,7 @@
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
+
__constants__ = ['dim', 'eps']
dim: int
eps: float
diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py
index d78d359..f4e1518 100644
--- a/torch/nn/modules/dropout.py
+++ b/torch/nn/modules/dropout.py
@@ -22,10 +22,11 @@
class Dropout(_DropoutNd):
- r"""During training, randomly zeroes some of the elements of the input
- tensor with probability :attr:`p` using samples from a Bernoulli
- distribution. Each channel will be zeroed out independently on every forward
- call.
+ r"""During training, randomly zeroes some of the elements of the input tensor with probability :attr:`p`.
+
+ The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution.
+
+ Each channel will be zeroed out independently on every forward call.
This has proven to be an effective technique for regularization and
preventing the co-adaptation of neurons as described in the paper
@@ -59,9 +60,12 @@
class Dropout1d(_DropoutNd):
- r"""Randomly zero out entire channels (a channel is a 1D feature map,
+ r"""Randomly zero out entire channels.
+
+ A channel is a 1D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 1D tensor :math:`\text{input}[i, j]`).
+ batched input is a 1D tensor :math:`\text{input}[i, j]`.
+
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
@@ -101,9 +105,12 @@
class Dropout2d(_DropoutNd):
- r"""Randomly zero out entire channels (a channel is a 2D feature map,
+ r"""Randomly zero out entire channels.
+
+ A channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 2D tensor :math:`\text{input}[i, j]`).
+ batched input is a 2D tensor :math:`\text{input}[i, j]`.
+
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
@@ -150,9 +157,12 @@
class Dropout3d(_DropoutNd):
- r"""Randomly zero out entire channels (a channel is a 3D feature map,
+ r"""Randomly zero out entire channels.
+
+ A channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
- batched input is a 3D tensor :math:`\text{input}[i, j]`).
+ batched input is a 3D tensor :math:`\text{input}[i, j]`.
+
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
@@ -234,9 +244,11 @@
class FeatureAlphaDropout(_DropoutNd):
- r"""Randomly masks out entire channels (a channel is a feature map,
+ r"""Randomly masks out entire channels.
+
+ A channel is a feature map,
e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
- is a tensor :math:`\text{input}[i, j]`) of the input tensor). Instead of
+ is a tensor :math:`\text{input}[i, j]` of the input tensor). Instead of
setting activations to zero, as in regular Dropout, the activations are set
to the negative saturation value of the SELU activation function. More details
can be found in the paper `Self-Normalizing Neural Networks`_ .
diff --git a/torch/nn/modules/flatten.py b/torch/nn/modules/flatten.py
index 5938461..eaf62d5 100644
--- a/torch/nn/modules/flatten.py
+++ b/torch/nn/modules/flatten.py
@@ -8,8 +8,9 @@
class Flatten(Module):
r"""
- Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.
- See :meth:`torch.flatten` for details.
+ Flattens a contiguous range of dims into a tensor.
+
+ For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.
Shape:
- Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
@@ -34,6 +35,7 @@
>>> output.size()
torch.Size([160, 5])
"""
+
__constants__ = ['start_dim', 'end_dim']
start_dim: int
end_dim: int
@@ -96,6 +98,7 @@
>>> output.size()
torch.Size([2, 2, 5, 5])
"""
+
NamedShape = Tuple[Tuple[str, int]]
__constants__ = ['dim', 'unflattened_size']
diff --git a/torch/nn/modules/fold.py b/torch/nn/modules/fold.py
index 2d14a9b..8ae9112 100644
--- a/torch/nn/modules/fold.py
+++ b/torch/nn/modules/fold.py
@@ -7,8 +7,7 @@
__all__ = ['Fold', 'Unfold']
class Fold(Module):
- r"""Combines an array of sliding local blocks into a large containing
- tensor.
+ r"""Combines an array of sliding local blocks into a large containing tensor.
Consider a batched :attr:`input` tensor containing sliding local blocks,
e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
@@ -118,6 +117,7 @@
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
+
__constants__ = ['output_size', 'kernel_size', 'dilation', 'padding',
'stride']
output_size: _size_any_t
@@ -274,6 +274,7 @@
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
+
__constants__ = ['kernel_size', 'dilation', 'padding', 'stride']
kernel_size: _size_any_t
dilation: _size_any_t
diff --git a/torch/nn/modules/instancenorm.py b/torch/nn/modules/instancenorm.py
index fbfbd56..d0c37b7 100644
--- a/torch/nn/modules/instancenorm.py
+++ b/torch/nn/modules/instancenorm.py
@@ -88,8 +88,10 @@
class InstanceNorm1d(_InstanceNorm):
- r"""Applies Instance Normalization over a 2D (unbatched) or 3D (batched) input
- as described in the paper
+ r"""Applies Instance Normalization.
+
+ This operation applies Instance Normalization
+ over a 2D (unbatched) or 3D (batched) input as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
@@ -163,11 +165,10 @@
class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):
- r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of
- the ``num_features`` argument of the :class:`InstanceNorm1d` that is inferred
- from the ``input.size(1)``.
- The attributes that will be lazily initialized are `weight`, `bias`,
- `running_mean` and `running_var`.
+ r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.
+
+ The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
+ The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
@@ -201,7 +202,10 @@
class InstanceNorm2d(_InstanceNorm):
- r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
+ r"""Applies Instance Normalization.
+
+ This operation applies Instance Normalization
+ over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
@@ -277,9 +281,9 @@
class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
- r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of
- the ``num_features`` argument of the :class:`InstanceNorm2d` that is inferred
- from the ``input.size(1)``.
+ r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.
+
+ The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
@@ -315,8 +319,10 @@
class InstanceNorm3d(_InstanceNorm):
- r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
- with additional channel dimension) as described in the paper
+ r"""Applies Instance Normalization.
+
+ This operation applies Instance Normalization
+ over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`__.
@@ -391,9 +397,9 @@
class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):
- r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of
- the ``num_features`` argument of the :class:`InstanceNorm3d` that is inferred
- from the ``input.size(1)``.
+ r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.
+
+ The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
The attributes that will be lazily initialized are `weight`, `bias`,
`running_mean` and `running_var`.
diff --git a/torch/nn/modules/lazy.py b/torch/nn/modules/lazy.py
index e436e37..2c033df 100644
--- a/torch/nn/modules/lazy.py
+++ b/torch/nn/modules/lazy.py
@@ -8,10 +8,11 @@
__all__ = ['LazyModuleMixin']
class _LazyProtocol(Protocol):
- """This is to avoid errors with mypy checks for
- The attributes in a mixin:
+ """This class is used to avoid errors with mypy checks for the attributes in a mixin.
+
https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
"""
+
def _register_load_state_dict_pre_hook(self, hook):
...
@@ -51,7 +52,7 @@
class LazyModuleMixin:
- r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules."
+ r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules".
.. warning:
Lazy modules are an experimental new feature under active development,
@@ -220,14 +221,14 @@
def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
r"""Initialize parameters according to the input batch properties.
+
This adds an interface to isolate parameter initialization from the
forward pass when doing parameter shape inference.
"""
raise NotImplementedError(f'initialize_parameters is not implemented for {self.__class__.__name__}')
def has_uninitialized_params(self: _LazyProtocol):
- r"""Check if a module has parameters that are not initialized
- """
+ r"""Check if a module has parameters that are not initialized."""
# This is to avoid the JIT to track this parameter and force
# custom modules __setstate__ to add it
params = self._parameters.values()
@@ -238,8 +239,8 @@
return False
def _infer_parameters(self: _LazyProtocol, module, args, kwargs=None):
- r"""Infers the size and initializes the parameters according to the
- provided input batch.
+ r"""Infers the size and initializes the parameters according to the provided input batch.
+
Given a module that contains parameters that were declared inferrable
using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
in the complete module using the provided input to initialize all the parameters
diff --git a/torch/nn/modules/linear.py b/torch/nn/modules/linear.py
index 03b641f..83e1b8a 100644
--- a/torch/nn/modules/linear.py
+++ b/torch/nn/modules/linear.py
@@ -38,6 +38,7 @@
torch.Size([128, 20])
"""
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__()
@@ -46,7 +47,7 @@
class Linear(Module):
- r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
+ r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
@@ -82,6 +83,7 @@
>>> print(output.size())
torch.Size([128, 30])
"""
+
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
@@ -130,8 +132,7 @@
class Bilinear(Module):
- r"""Applies a bilinear transformation to the incoming data:
- :math:`y = x_1^T A x_2 + b`
+ r"""Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`.
Args:
in1_features: size of each first input sample
@@ -167,6 +168,7 @@
>>> print(output.size())
torch.Size([128, 40])
"""
+
__constants__ = ['in1_features', 'in2_features', 'out_features']
in1_features: int
in2_features: int
diff --git a/torch/nn/modules/normalization.py b/torch/nn/modules/normalization.py
index 8a2211e..6502ec2 100644
--- a/torch/nn/modules/normalization.py
+++ b/torch/nn/modules/normalization.py
@@ -12,8 +12,9 @@
__all__ = ['LocalResponseNorm', 'CrossMapLRN2d', 'LayerNorm', 'GroupNorm']
class LocalResponseNorm(Module):
- r"""Applies local response normalization over an input signal composed
- of several input planes, where channels occupy the second dimension.
+ r"""Applies local response normalization over an input signal.
+
+ The input signal is composed of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
@@ -39,6 +40,7 @@
>>> output_4d = lrn(signal_4d)
"""
+
__constants__ = ['size', 'alpha', 'beta', 'k']
size: int
alpha: float
@@ -85,7 +87,9 @@
class LayerNorm(Module):
- r"""Applies Layer Normalization over a mini-batch of inputs as described in
+ r"""Applies Layer Normalization over a mini-batch of inputs.
+
+ This layer implements the operation as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
.. math::
@@ -159,6 +163,7 @@
:scale: 50 %
"""
+
__constants__ = ['normalized_shape', 'eps', 'elementwise_affine']
normalized_shape: Tuple[int, ...]
eps: float
@@ -202,7 +207,9 @@
class GroupNorm(Module):
- r"""Applies Group Normalization over a mini-batch of inputs as described in
+ r"""Applies Group Normalization over a mini-batch of inputs.
+
+ This layer implements the operation as described in
the paper `Group Normalization <https://arxiv.org/abs/1803.08494>`__
.. math::
@@ -244,6 +251,7 @@
>>> # Activating the module
>>> output = m(input)
"""
+
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine']
num_groups: int
num_channels: int
diff --git a/torch/nn/modules/padding.py b/torch/nn/modules/padding.py
index c8edf07..9401089 100644
--- a/torch/nn/modules/padding.py
+++ b/torch/nn/modules/padding.py
@@ -65,8 +65,8 @@
>>> m(input)
tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
[5., 6., 7., 4., 5., 6., 7., 4.]]])
-
"""
+
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
@@ -126,8 +126,8 @@
[2., 0., 1., 2., 0.],
[5., 3., 4., 5., 3.],
[8., 6., 7., 8., 6.]]]])
-
"""
+
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
@@ -177,8 +177,8 @@
>>> # using different paddings for different sides
>>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
>>> output = m(input)
-
"""
+
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
@@ -250,8 +250,8 @@
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
[ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
-
"""
+
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t, value: float):
@@ -300,8 +300,8 @@
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
-
"""
+
__constants__ = ['padding', 'value']
padding: Tuple[int, int, int, int]
@@ -341,8 +341,8 @@
>>> # using different paddings for different sides
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
>>> output = m(input)
-
"""
+
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t, value: float) -> None:
@@ -393,8 +393,8 @@
>>> m(input)
tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
[7., 6., 5., 4., 5., 6., 7., 6.]]])
-
"""
+
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
@@ -445,8 +445,8 @@
[1., 0., 1., 2., 1.],
[4., 3., 4., 5., 4.],
[7., 6., 7., 8., 7.]]]])
-
"""
+
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
@@ -500,6 +500,7 @@
[3., 2., 3., 2.],
[1., 0., 1., 0.]]]]])
"""
+
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
@@ -550,8 +551,8 @@
>>> m(input)
tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
[4., 4., 4., 4., 5., 6., 7., 7.]]])
-
"""
+
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
@@ -602,8 +603,8 @@
[0., 0., 1., 2., 2.],
[3., 3., 4., 5., 5.],
[6., 6., 7., 8., 8.]]]])
-
"""
+
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
@@ -643,8 +644,8 @@
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
>>> output = m(input)
-
"""
+
padding: Tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
@@ -694,8 +695,8 @@
>>> m(input)
tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
[ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
-
"""
+
padding: Tuple[int, int]
def __init__(self, padding: _size_2_t) -> None:
@@ -747,8 +748,8 @@
[ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
[ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
[ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
-
"""
+
padding: Tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
@@ -788,7 +789,6 @@
>>> # using different paddings for different sides
>>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
>>> output = m(input)
-
"""
padding: Tuple[int, int, int, int, int, int]