Empty sparse tensor copy revers dimI, dimV. (#5414)
diff --git a/aten/src/THCS/generic/THCSTensor.cpp b/aten/src/THCS/generic/THCSTensor.cpp
index da97263..eaa3118 100644
--- a/aten/src/THCS/generic/THCSTensor.cpp
+++ b/aten/src/THCS/generic/THCSTensor.cpp
@@ -149,8 +149,8 @@
// TODO: we may need to special case when only one of these are empty.
if (THCudaLongTensor_nDimension(state, indices) == 0 && THCTensor_(nDimension)(state, values) == 0
&& sizes != NULL) {
- nDimI = 0;
- nDimV = THLongStorage_size(sizes);
+ nDimI = THLongStorage_size(sizes);
+ nDimV = 0;
} else {
nDimI = THCIndexTensor_(size)(state, indices, 0);
nDimV = THCTensor_(nDimension)(state, values) - 1;
diff --git a/aten/src/THS/generic/THSTensor.cpp b/aten/src/THS/generic/THSTensor.cpp
index e3ee532..b0bb02e 100644
--- a/aten/src/THS/generic/THSTensor.cpp
+++ b/aten/src/THS/generic/THSTensor.cpp
@@ -148,8 +148,8 @@
// TODO: we may need to special case when only one of these are empty.
if (THLongTensor_nDimension(indices) == 0 && THTensor_(nDimension)(values) == 0 && sizes != NULL) {
- nDimI = 0;
- nDimV = THLongStorage_size(sizes);
+ nDimI = THLongStorage_size(sizes);
+ nDimV = 0;
} else {
nDimI = THLongTensor_size(indices, 0);
nDimV = THTensor_(nDimension)(values) - 1;
diff --git a/test/test_sparse.py b/test/test_sparse.py
index 01e006a..fe7c177 100644
--- a/test/test_sparse.py
+++ b/test/test_sparse.py
@@ -344,7 +344,11 @@
from torch.autograd import Variable
x = Variable(torch.sparse.FloatTensor(2, 3, 4))
y = x.cuda(0)
- x.cpu()
+ self.assertEqual(x._dimI(), y._dimI())
+ self.assertEqual(x._dimV(), y._dimV())
+ x = y.cpu()
+ self.assertEqual(y._dimI(), x._dimI())
+ self.assertEqual(y._dimV(), x._dimV())
def test_transpose(self):
x = self._gen_sparse(4, 20, 5)[0]