docs: Fix a few typos (#81435)
There are small typos in:
- caffe2/python/recurrent.py
- test/distributed/test_c10d_nccl.py
- test/test_fx.py
- torch/csrc/jit/runtime/autodiff.cpp
- torchgen/gen.py
Fixes:
- Should read `propagation` rather than `propogation`.
- Should read `multiplied` rather than `multuplied`.
- Should read `eliminate` rather than `elminate`.
- Should read `dispatcher` rather than `disaptcher`.
Semi-automated pull request generated by
https://github.com/timgates42/meticulous/blob/master/docs/NOTE.md
Pull Request resolved: https://github.com/pytorch/pytorch/pull/81435
Approved by: https://github.com/ngimel
diff --git a/caffe2/python/recurrent.py b/caffe2/python/recurrent.py
index d4762f0..8bb0d9c 100644
--- a/caffe2/python/recurrent.py
+++ b/caffe2/python/recurrent.py
@@ -282,7 +282,7 @@
cell_net.Proto().type = 'simple'
# The last output is a list of step workspaces,
- # which is only needed internally for gradient propogation
+ # which is only needed internally for gradient propagation
return results[:-1]
diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py
index b687650..f413901 100644
--- a/test/distributed/test_c10d_nccl.py
+++ b/test/distributed/test_c10d_nccl.py
@@ -2183,7 +2183,7 @@
process_group, allreduce_with_then_hook
)
- # check whether the grads are equal to what allreduce returns multuplied by 5.
+ # check whether the grads are equal to what allreduce returns multiplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
diff --git a/test/test_fx.py b/test/test_fx.py
index 8fb7c76..3e3b12f 100644
--- a/test/test_fx.py
+++ b/test/test_fx.py
@@ -1550,7 +1550,7 @@
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
- # Test shape propogation and make sure results match actual
+ # Test shape propagation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
diff --git a/torch/csrc/jit/runtime/autodiff.cpp b/torch/csrc/jit/runtime/autodiff.cpp
index a8dc3cc..7e1f618 100644
--- a/torch/csrc/jit/runtime/autodiff.cpp
+++ b/torch/csrc/jit/runtime/autodiff.cpp
@@ -389,7 +389,7 @@
static ReverseDetails addReverseInline(Gradient& grad_desc) {
auto& graph = *grad_desc.f;
// note: reverse_node is intentionally not inserted to avoid
- // accidentally acting on it (e.g. in elminate dead code),
+ // accidentally acting on it (e.g. in eliminate dead code),
// std::cout << *reverse_node << to view its state.
auto reverse_node = graph.create(prim::Reverse, 0);
auto reverse_block = reverse_node->addBlock();
diff --git a/torchgen/gen.py b/torchgen/gen.py
index 2ff7773..807fa09 100644
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -103,7 +103,7 @@
# - 'api' has conversions for how to translate JIT schema into
# the various C++ APIs that the codegen interacts with. There
# are in fact THREE different C++ APIs: the public C++ API,
-# the dispatcher API, and the legacy disaptcher API. See each
+# the dispatcher API, and the legacy dispatcher API. See each
# of these respective files for more information
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #