commit | 7b7f35704274e32920d1a201c929d60364f076f8 | [log] [tgz] |
---|---|---|
author | Joel Schlosser <jbschlosser@meta.com> | Mon Jun 24 16:08:39 2024 -0400 |
committer | PyTorch MergeBot <pytorchmergebot@users.noreply.github.com> | Mon Jun 24 22:32:01 2024 +0000 |
tree | 5adc2fe19f162f7ecd7cf96a98a8b7ef491426fc | |
parent | 5f912f480c05ec0f7a2511fb8699adc8ba858b55 [diff] |
Fix DEBUG=1 asserts with NJT ops (#129014) Pull Request resolved: https://github.com/pytorch/pytorch/pull/129014 Approved by: https://github.com/YuqingJ, https://github.com/soulitzer
diff --git a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp index eff2a27..d4bcf48 100644 --- a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp +++ b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp
@@ -379,8 +379,8 @@ [&](size_t idx_tensor, size_t idx_ret, const at::Tensor& t) { if (at::impl::tensor_has_dispatch(t) || at::impl::dispatch_mode_enabled() || - // NJT offsets are expected to be reused; skip use_count() check - op_name == "aten::_nested_get_offsets") + // NJT components are expected to be reused; skip use_count() check + op_name.rfind("aten::_nested_get", 0) == 0) return; if (!is_inplace_output[idx_ret]) TORCH_INTERNAL_ASSERT(