| from .module import Module |
| from .. import functional as F |
| |
| |
| class Dropout(Module): |
| r"""During training, randomly zeroes some of the elements of the input |
| tensor with probability *p* using samples from a bernoulli distribution. |
| The elements to zero are randomized on every forward call. |
| |
| This has proven to be an effective technique for regularization and |
| preventing the co-adaptation of neurons as described in the paper |
| `Improving neural networks by preventing co-adaptation of feature |
| detectors`_ . |
| |
| Furthermore, the outputs are scaled by a factor of *1/(1-p)* during |
| training. This means that during evaluation the module simply computes an |
| identity function. |
| |
| Args: |
| p: probability of an element to be zeroed. Default: 0.5 |
| inplace: If set to True, will do this operation in-place. Default: false |
| |
| Shape: |
| - Input: `Any`. Input can be of any shape |
| - Output: `Same`. Output is of the same shape as input |
| |
| Examples:: |
| |
| >>> m = nn.Dropout(p=0.2) |
| >>> input = autograd.Variable(torch.randn(20, 16)) |
| >>> output = m(input) |
| |
| .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580 |
| """ |
| |
| def __init__(self, p=0.5, inplace=False): |
| super(Dropout, self).__init__() |
| if p < 0 or p > 1: |
| raise ValueError("dropout probability has to be between 0 and 1, " |
| "but got {}".format(p)) |
| self.p = p |
| self.inplace = inplace |
| |
| def forward(self, input): |
| return F.dropout(input, self.p, self.training, self.inplace) |
| |
| def __repr__(self): |
| inplace_str = ', inplace' if self.inplace else '' |
| return self.__class__.__name__ + ' (' \ |
| + 'p = ' + str(self.p) \ |
| + inplace_str + ')' |
| |
| |
| class Dropout2d(Module): |
| r"""Randomly zeroes whole channels of the input tensor. |
| The channels to zero-out are randomized on every forward call. |
| |
| *Usually the input comes from Conv2d modules.* |
| |
| As described in the paper |
| `Efficient Object Localization Using Convolutional Networks`_ , |
| if adjacent pixels within feature maps are strongly correlated |
| (as is normally the case in early convolution layers) then iid dropout |
| will not regularize the activations and will otherwise just result |
| in an effective learning rate decrease. |
| |
| In this case, :func:`nn.Dropout2d` will help promote independence between |
| feature maps and should be used instead. |
| |
| Args: |
| p (float, optional): probability of an element to be zeroed. |
| inplace (bool, optional): If set to True, will do this operation in-place |
| |
| Shape: |
| - Input: :math:`(N, C, H, W)` |
| - Output: :math:`(N, C, H, W)` (same shape as input) |
| |
| Examples:: |
| |
| >>> m = nn.Dropout2d(p=0.2) |
| >>> input = autograd.Variable(torch.randn(20, 16, 32, 32)) |
| >>> output = m(input) |
| |
| .. _Efficient Object Localization Using Convolutional Networks: |
| http://arxiv.org/abs/1411.4280 |
| """ |
| |
| def __init__(self, p=0.5, inplace=False): |
| super(Dropout2d, self).__init__() |
| if p < 0 or p > 1: |
| raise ValueError("dropout probability has to be between 0 and 1, " |
| "but got {}".format(p)) |
| self.p = p |
| self.inplace = inplace |
| |
| def forward(self, input): |
| return self._backend.Dropout2d(self.p, self.training, self.inplace)(input) |
| |
| def __repr__(self): |
| inplace_str = ', inplace' if self.inplace else '' |
| return self.__class__.__name__ + ' (' \ |
| + 'p=' + str(self.p) \ |
| + inplace_str + ')' |
| |
| |
| class Dropout3d(Module): |
| r"""Randomly zeroes whole channels of the input tensor. |
| The channels to zero are randomized on every forward call. |
| |
| *Usually the input comes from Conv3d modules.* |
| |
| As described in the paper |
| `Efficient Object Localization Using Convolutional Networks`_ , |
| if adjacent pixels within feature maps are strongly correlated |
| (as is normally the case in early convolution layers) then iid dropout |
| will not regularize the activations and will otherwise just result |
| in an effective learning rate decrease. |
| |
| In this case, :func:`nn.Dropout3d` will help promote independence between |
| feature maps and should be used instead. |
| |
| Args: |
| p (float, optional): probability of an element to be zeroed. |
| inplace (bool, optional): If set to True, will do this operation in-place |
| |
| Shape: |
| - Input: :math:`(N, C, D, H, W)` |
| - Output: :math:`(N, C, D, H, W)` (same shape as input) |
| |
| Examples:: |
| |
| >>> m = nn.Dropout3d(p=0.2) |
| >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32)) |
| >>> output = m(input) |
| |
| .. _Efficient Object Localization Using Convolutional Networks: |
| http://arxiv.org/abs/1411.4280 |
| """ |
| |
| def __init__(self, p=0.5, inplace=False): |
| super(Dropout3d, self).__init__() |
| if p < 0 or p > 1: |
| raise ValueError("dropout probability has to be between 0 and 1, " |
| "but got {}".format(p)) |
| self.p = p |
| self.inplace = inplace |
| |
| def forward(self, input): |
| return self._backend.Dropout3d(self.p, self.training, self.inplace)(input) |
| |
| def __repr__(self): |
| inplace_str = ', inplace' if self.inplace else '' |
| return self.__class__.__name__ + ' (' \ |
| + 'p=' + str(self.p) \ |
| + inplace_str + ')' |
| |
| |
| class AlphaDropout(Module): |
| r"""Applies Alpha Dropout over the input. |
| |
| Alpha Dropout is a type of Dropout that maintains the self-normalizing |
| property. |
| For an input with zero mean and unit standard deviation, the output of |
| Alpha Dropout maintains the original mean and standard deviation of the |
| input. |
| Alpha Dropout goes hand-in-hand with SELU activation function, which ensures |
| that the outputs have zero mean and unit standard deviation. |
| |
| During training, it randomly masks some of the elements of the input |
| tensor with probability *p* using samples from a bernoulli distribution. |
| The elements to masked are randomized on every forward call, and scaled |
| and shifted to maintain zero mean and unit standard deviation. |
| |
| During evaluation the module simply computes an identity function. |
| |
| More details can be found in the paper `Self-Normalizing Neural Networks`_ . |
| |
| Args: |
| p (float): probability of an element to be dropped. Default: 0.5 |
| |
| Shape: |
| - Input: `Any`. Input can be of any shape |
| - Output: `Same`. Output is of the same shape as input |
| |
| Examples:: |
| |
| >>> m = nn.AlphaDropout(p=0.2) |
| >>> input = autograd.Variable(torch.randn(20, 16)) |
| >>> output = m(input) |
| |
| .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515 |
| """ |
| |
| def __init__(self, p=0.5): |
| super(AlphaDropout, self).__init__() |
| if p < 0 or p > 1: |
| raise ValueError("dropout probability has to be between 0 and 1, " |
| "but got {}".format(p)) |
| self.p = p |
| |
| def forward(self, input): |
| return F.alpha_dropout(input, self.p, self.training) |
| |
| def __repr__(self): |
| return self.__class__.__name__ + ' (' \ |
| + 'p = ' + str(self.p) + ')' |