Revert D16647820: Add `names` argument to ones, rand, randn, zeros, full
Differential Revision:
D16647820
Original commit changeset: c6c53c5f26a8
fbshipit-source-id: a341c6eda49f5dd2e1712b65e61fef99791f0668
diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp
index f754c38..7960bc1 100644
--- a/aten/src/ATen/native/TensorFactories.cpp
+++ b/aten/src/ATen/native/TensorFactories.cpp
@@ -122,9 +122,6 @@
IntArrayRef size,
at::optional<DimnameList> names,
const TensorOptions& options) {
- if (!names.has_value()) {
- return at::empty(size, options);
- }
TORCH_CHECK(options.layout() == Layout::Strided,
"NYI: named tensors only support strided layout");
TORCH_CHECK(options.backend() == Backend::CPU || options.backend() == Backend::CUDA,
@@ -837,68 +834,5 @@
return self;
}
-#ifdef BUILD_NAMEDTENSOR
-// ~~~~~~~~~~~~~~~~~~~~~~~~~ named tensor overloads ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-// In the short term, these exist.
-// In the long term, we should move DimnameList into TensorOptions to avoid
-// having these overloads.
-
-Tensor full(
- IntArrayRef size,
- Scalar fill_value,
- optional<DimnameList> names,
- const TensorOptions& options) {
- auto result = at::empty(size, names, options);
- return result.fill_(fill_value);
-}
-
-Tensor ones(
- IntArrayRef size,
- optional<DimnameList> names,
- const TensorOptions& options) {
- return native::full(size, /*fill_value=*/1, names, options);
-}
-
-Tensor zeros(
- IntArrayRef size,
- optional<DimnameList> names,
- const TensorOptions& options) {
- return native::full(size, /*fill_value=*/0, names, options);
-}
-
-Tensor randn(
- IntArrayRef size,
- optional<DimnameList> names,
- const TensorOptions& options) {
- return native::randn(size, nullptr, names, options);
-}
-
-Tensor randn(
- IntArrayRef size,
- Generator* generator,
- optional<DimnameList> names,
- const TensorOptions& options) {
- auto result = at::empty(size, names, options);
- return result.normal_(0, 1, generator);
-}
-
-Tensor rand(
- IntArrayRef size,
- optional<DimnameList> names,
- const TensorOptions& options) {
- return native::rand(size, nullptr, names, options);
-}
-
-Tensor rand(
- IntArrayRef size,
- Generator* generator,
- optional<DimnameList> names,
- const TensorOptions& options) {
- auto result = at::empty(size, names, options);
- return result.uniform_(0, 1, generator);
-}
-
-#endif
-
} // namespace native
} // namespace at
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index ba62217..ce5a97b 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -744,7 +744,6 @@
CUDA: _embedding_bag_per_sample_weights_backward_cuda
- func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
- func: empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
dispatch:
@@ -914,9 +913,6 @@
CPU: _frac_out_cpu
CUDA: _frac_out_cuda
-- func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
- func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
@@ -1531,9 +1527,6 @@
- func: _nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor
variants: function
-- func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
- func: ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
@@ -1587,12 +1580,6 @@
- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
-- func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
-- func: rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
- func: rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -1633,12 +1620,6 @@
- func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
-- func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
-- func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
- func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
- func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
@@ -2324,9 +2305,6 @@
- func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
variants: function
-- func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- device_guard: False
-
- func: zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
diff --git a/test/test_namedtensor.py b/test/test_namedtensor.py
index c10e328..23f475c 100644
--- a/test/test_namedtensor.py
+++ b/test/test_namedtensor.py
@@ -76,6 +76,9 @@
x = factory(2, 1, 1, names=('C.in', 'H', 'C'), device=device)
+ def test_empty(self):
+ self._test_factory(torch.empty, 'cpu')
+
def test_has_names(self):
unnamed = torch.empty(2, 3)
none_named = torch.empty(2, 3, names=(None, None))
@@ -148,38 +151,9 @@
with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
tensor.names = ['N', 'N']
- def test_factory_edge_cases(self):
- for device in torch.testing.get_all_device_types():
- self._test_factory(torch.empty, device)
-
- def test_factory_coverage(self):
- def _test(factory, device):
- names = ('N', 'T', 'D')
-
- torch.manual_seed(0)
- result = factory(1, 2, 3, names=names, device=device)
-
- torch.manual_seed(0)
- expected = factory(1, 2, 3, device=device).set_names_(names)
-
- self.assertTensorDataAndNamesEqual(result, expected)
-
- supported = [
- torch.ones,
- torch.rand,
- torch.randn,
- torch.zeros,
- ]
-
- for op, device in itertools.product(supported, torch.testing.get_all_device_types()):
- _test(op, device)
-
- # Test torch.full
- for device in torch.testing.get_all_device_types():
- names = ('N', 'T', 'D')
- result = torch.full([1, 2, 3], 2, names=names, device=device)
- expected = torch.full([1, 2, 3], 2, device=device).set_names_(names)
- self.assertTensorDataAndNamesEqual(result, expected)
+ @unittest.skipIf(not TEST_CUDA, 'no CUDA')
+ def test_empty_cuda(self):
+ self._test_factory(torch.empty, 'cuda')
def test_size(self):
t = torch.empty(2, 3, 5, names=('N', None, 'C'))