add `to` method for PackedSequence (#7319)
* ENH: add to method for PackedSequence
* ENH: return self if possible
* TST: remove extra data
* DOC: add more explanation
* TST: remove extra data
* DOC: minor fix
diff --git a/test/test_nn.py b/test/test_nn.py
index 6bceb5a..f241cb8 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -161,6 +161,24 @@
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
+ def test_to(self):
+ padded, lengths = self._padded_sequence(torch.IntTensor)
+ a = rnn_utils.pack_padded_sequence(padded, lengths).cpu()
+
+ self.assertIs(a, a.to('cpu'))
+ self.assertIs(a, a.to('cpu', dtype=torch.int32))
+ self.assertEqual(a.long(), a.to(torch.int64))
+
+ if torch.cuda.is_available():
+ for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
+ b = a.cuda(device=cuda)
+ self.assertIs(b, b.to(cuda))
+ self.assertEqual(a, b.to('cpu'))
+ self.assertEqual(b, a.to(cuda))
+ self.assertEqual(a, b.to('cpu', dtype=torch.int32))
+ self.assertIs(b, b.to(dtype=torch.int32))
+ self.assertEqual(b.long(), b.to(dtype=torch.int64))
+
def default_tensor_type(type):
type_str = torch.typename(type)
diff --git a/torch/nn/utils/rnn.py b/torch/nn/utils/rnn.py
index 0a303f0..a6adfe9 100644
--- a/torch/nn/utils/rnn.py
+++ b/torch/nn/utils/rnn.py
@@ -85,6 +85,23 @@
r"""Returns copy with `self.data` cast to byte type"""
return type(self)(self.data.byte(), self.batch_sizes)
+ def to(self, *args, **kwargs):
+ r"""Performs dtype and/or device conversion on `self.data`.
+
+ It has similar signature as :meth:`torch.Tensor.to`.
+
+ .. note::
+
+ If the ``self.data`` Tensor already has the correct :class:`torch.dtype`
+ and :class:`torch.device`, then ``self`` is returned.
+ Otherwise, returns a copy with the desired configuration.
+ """
+ data = self.data.to(*args, **kwargs)
+ if data is self.data:
+ return self
+ else:
+ return type(self)(data, self.batch_sizes)
+
@property
def is_cuda(self):
r"""Returns true if `self.data` stored on a gpu"""