remove beta defaulting in smooth_l1_loss_backward. added to the bc whitelist (#45588)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/45588
Test Plan: Imported from OSS
Reviewed By: mrshenli
Differential Revision: D24024312
Pulled By: bdhirsh
fbshipit-source-id: 7246e5da741fbc5641deecaf057ae9a6e44e8c34
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 82bf599..92a20d8 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -6795,13 +6795,13 @@
dispatch:
CPU, CUDA: smooth_l1_loss
-- func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta=1.0, *, Tensor(a!) grad_input) -> Tensor(a!)
+- func: smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
dispatch:
CPU: smooth_l1_loss_backward_out
CUDA: smooth_l1_loss_backward_out
-- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta=1.0) -> Tensor
+- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
use_c10_dispatcher: full
python_module: nn
diff --git a/test/backward_compatibility/check_backward_compatibility.py b/test/backward_compatibility/check_backward_compatibility.py
index 0e4bcd1..a2f843d 100644
--- a/test/backward_compatibility/check_backward_compatibility.py
+++ b/test/backward_compatibility/check_backward_compatibility.py
@@ -110,6 +110,7 @@
("aten::_foreach_sub", datetime.date(2020, 10, 1)),
("aten::_amp_non_finite_check_and_unscale_", datetime.date(9999, 1, 1)),
("aten::choose_qparams_optimized", datetime.date(2020, 10, 5)),
+ ("aten::smooth_l1_loss_backward", datetime.date(2020, 10, 15)),
]
diff --git a/tools/autograd/derivatives.yaml b/tools/autograd/derivatives.yaml
index 707baa3..92ee277 100644
--- a/tools/autograd/derivatives.yaml
+++ b/tools/autograd/derivatives.yaml
@@ -1589,7 +1589,7 @@
grad_output: replication_pad3d(grad, padding)
self: zeros_like(self)
-- name: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta=1.0) -> Tensor
+- name: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
grad_output: smooth_l1_loss_double_backward_grad_output(grad, grad_output, self, target, reduction, beta)
self: smooth_l1_loss_double_backward(grad * grad_output, self, target, reduction, beta)
target: -smooth_l1_loss_double_backward(grad * grad_output, self, target, reduction, beta)