explicitly provide memory format when calling to clone() at Sorting.cpp
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/28663
Test Plan: Imported from OSS
Differential Revision: D18333373
Pulled By: ifedan
fbshipit-source-id: 908880dd58d5e795db661a7249a11028f610c328
diff --git a/aten/src/ATen/native/Sorting.cpp b/aten/src/ATen/native/Sorting.cpp
index 53b2cfe..e2e4f35 100644
--- a/aten/src/ATen/native/Sorting.cpp
+++ b/aten/src/ATen/native/Sorting.cpp
@@ -120,7 +120,7 @@
indices.zero_();
return std::forward_as_tuple(values, indices);
}
- auto tmp_values = self.clone();
+ auto tmp_values = self.clone(at::MemoryFormat::Contiguous);
auto tmp_indices = at::empty(self.sizes(), self.options().dtype(kLong));
AT_DISPATCH_ALL_TYPES(self.scalar_type(), "kthvalue_cpu", [&] {
dim_apply(
@@ -290,9 +290,9 @@
#endif
TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor");
if (self.dim() == 0 && self.numel() == 1) {
- return self.clone();
+ return self.clone(at::MemoryFormat::Contiguous);
}
- auto tmp_values = self.clone().view(-1);
+ auto tmp_values = self.clone(at::MemoryFormat::Contiguous).view(-1);
auto result = at::empty({1}, self.options());
AT_DISPATCH_ALL_TYPES(self.scalar_type(), "median", [&] {
// note, quick_select is 0 based while kthvalue is not