Fix Typing Error for Padding with asymmetric signatures (#24895)
Summary:
This PR resolves https://github.com/pytorch/pytorch/issues/24806
Pull Request resolved: https://github.com/pytorch/pytorch/pull/24895
Differential Revision: D16925208
Pulled By: ezyang
fbshipit-source-id: f4a374ca86e2e99faa30ca4b41c681e9976fe2de
diff --git a/torch/nn/common_types.pyi b/torch/nn/common_types.pyi
index 4363011..fa9d5bb 100644
--- a/torch/nn/common_types.pyi
+++ b/torch/nn/common_types.pyi
@@ -11,12 +11,18 @@
_scalar_or_tuple_1_t = Union[T, Tuple[T]]
_scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
_scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
+_scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
+_scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
+_scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]
# For arguments which represent size parameters (eg, kernel size, padding)
_size_any_t = _scalar_or_tuple_any_t[int]
_size_1_t = _scalar_or_tuple_1_t[int]
_size_2_t = _scalar_or_tuple_2_t[int]
_size_3_t = _scalar_or_tuple_3_t[int]
+_size_4_t = _scalar_or_tuple_4_t[int]
+_size_5_t = _scalar_or_tuple_5_t[int]
+_size_6_t = _scalar_or_tuple_6_t[int]
# For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
_ratio_2_t = _scalar_or_tuple_2_t[float]
diff --git a/torch/nn/modules/padding.pyi.in b/torch/nn/modules/padding.pyi.in
index 388be5d..a04130c 100644
--- a/torch/nn/modules/padding.pyi.in
+++ b/torch/nn/modules/padding.pyi.in
@@ -1,6 +1,6 @@
from .module import Module
from ... import Tensor
-from ..common_types import _size_1_t, _size_2_t, _size_3_t
+from ..common_types import _size_2_t, _size_4_t, _size_6_t
class _ConstantPadNd(Module):
@@ -12,21 +12,21 @@
class ConstantPad1d(_ConstantPadNd):
- padding: _size_1_t = ...
-
- def __init__(self, padding: _size_1_t, value: float) -> None: ...
-
-
-class ConstantPad2d(_ConstantPadNd):
padding: _size_2_t = ...
def __init__(self, padding: _size_2_t, value: float) -> None: ...
-class ConstantPad3d(_ConstantPadNd):
- padding: _size_3_t = ...
+class ConstantPad2d(_ConstantPadNd):
+ padding: _size_4_t = ...
- def __init__(self, padding: _size_3_t, value: float) -> None: ...
+ def __init__(self, padding: _size_4_t, value: float) -> None: ...
+
+
+class ConstantPad3d(_ConstantPadNd):
+ padding: _size_6_t = ...
+
+ def __init__(self, padding: _size_6_t, value: float) -> None: ...
class _ReflectionPadNd(Module):
@@ -36,17 +36,17 @@
class ReflectionPad1d(_ReflectionPadNd):
- padding: _size_1_t = ...
-
- def __init__(self, padding: _size_1_t) -> None: ...
-
-
-class ReflectionPad2d(_ReflectionPadNd):
padding: _size_2_t = ...
def __init__(self, padding: _size_2_t) -> None: ...
+class ReflectionPad2d(_ReflectionPadNd):
+ padding: _size_4_t = ...
+
+ def __init__(self, padding: _size_4_t) -> None: ...
+
+
class _ReplicationPadNd(Module):
def forward(self, input: Tensor) -> Tensor: ...
@@ -54,24 +54,24 @@
class ReplicationPad1d(_ReplicationPadNd):
- padding: _size_1_t = ...
+ padding: _size_2_t = ...
- def __init__(self, padding: _size_1_t) -> None: ...
+ def __init__(self, padding: _size_2_t) -> None: ...
class ReplicationPad2d(_ReplicationPadNd):
- padding: _size_2_t = ...
+ padding: _size_4_t = ...
- def __init__(self, padding: _size_2_t) -> None: ...
+ def __init__(self, padding: _size_4_t) -> None: ...
class ReplicationPad3d(_ReplicationPadNd):
- padding: _size_3_t = ...
+ padding: _size_6_t = ...
- def __init__(self, padding: _size_3_t) -> None: ...
+ def __init__(self, padding: _size_6_t) -> None: ...
class ZeroPad2d(ConstantPad2d):
- padding: _size_2_t = ...
+ padding: _size_4_t = ...
- def __init__(self, padding: _size_2_t) -> None: ...
+ def __init__(self, padding: _size_4_t) -> None: ...