| commit | bd5b4e6504bf487c313d0b85100242898ad85c8d | [log] [tgz] |
|---|---|---|
| author | Rohan Varma <rvarm1@fb.com> | Wed Nov 02 16:31:16 2022 +0000 |
| committer | PyTorch MergeBot <pytorchmergebot@users.noreply.github.com> | Wed Nov 02 16:31:16 2022 +0000 |
| tree | 522bb019019284646604a94e8cf730dd1b9c970b | |
| parent | 7382c88df2889bf58ef62fe52ed3e1361e384811 [diff] |
[Easy] Unused var in functional_adam (#88292) Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/88292 Approved by: https://github.com/awgu
diff --git a/torch/distributed/optim/functional_adam.py b/torch/distributed/optim/functional_adam.py index 72001f1..92b749a 100644 --- a/torch/distributed/optim/functional_adam.py +++ b/torch/distributed/optim/functional_adam.py
@@ -66,7 +66,6 @@ Similar to step, but operates on a single parameter and optionally a gradient tensor. """ - params = [param] params_with_grad = [] grads = [] exp_avgs = []