Rename set_names -> view_names, set_names_ -> names_ (#23962)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/23962

This change should make the semantics clearer.

`tensor.names_(names)` sets tensor.names to be `names`.

`tensor.view_names(names)` returns a view of the tensor with names
`names`.

Test Plan
- [namedtensor ci]

Test Plan: Imported from OSS

Differential Revision: D16710915

Pulled By: zou3519

fbshipit-source-id: c82fa9812624d03c86f7be84b0a460e3c047aaa0
diff --git a/aten/src/ATen/core/Tensor.h b/aten/src/ATen/core/Tensor.h
index 8069ca5..fa0366b 100644
--- a/aten/src/ATen/core/Tensor.h
+++ b/aten/src/ATen/core/Tensor.h
@@ -360,10 +360,10 @@
   void backward(const Tensor & gradient={}, bool keep_graph=false, bool create_graph=false) const;
   void set_data(const Tensor & new_data) const;
   #ifdef BUILD_NAMEDTENSOR
-  Tensor & set_names_(c10::optional<DimnameList> names) const;
+  Tensor & names_(c10::optional<DimnameList> names) const;
   #endif
   #ifdef BUILD_NAMEDTENSOR
-  Tensor set_names(c10::optional<DimnameList> names) const;
+  Tensor view_names(c10::optional<DimnameList> names) const;
   #endif
   #ifdef BUILD_NAMEDTENSOR
   Tensor align_to(DimnameList names) const;
diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h
index 261bb14..ced92a6 100644
--- a/aten/src/ATen/core/TensorMethods.h
+++ b/aten/src/ATen/core/TensorMethods.h
@@ -66,14 +66,14 @@
     return table->getOp<void (const Tensor &, const Tensor &)>(tensorTypeIdToBackend(type_id()), is_variable())(const_cast<Tensor&>(*this), new_data);
 }
 #ifdef BUILD_NAMEDTENSOR
-inline Tensor & Tensor::set_names_(c10::optional<DimnameList> names) const {
-    static auto table = globalATenDispatch().getOpTable("aten::set_names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)");
+inline Tensor & Tensor::names_(c10::optional<DimnameList> names) const {
+    static auto table = globalATenDispatch().getOpTable("aten::names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)");
     return table->getOp<Tensor & (Tensor &, c10::optional<DimnameList>)>(tensorTypeIdToBackend(type_id()), is_variable())(const_cast<Tensor&>(*this), names);
 }
 #endif
 #ifdef BUILD_NAMEDTENSOR
-inline Tensor Tensor::set_names(c10::optional<DimnameList> names) const {
-    static auto table = globalATenDispatch().getOpTable("aten::set_names(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)");
+inline Tensor Tensor::view_names(c10::optional<DimnameList> names) const {
+    static auto table = globalATenDispatch().getOpTable("aten::view_names(Tensor(a) self, Dimname[]? names) -> Tensor(a)");
     return table->getOp<Tensor (const Tensor &, c10::optional<DimnameList>)>(tensorTypeIdToBackend(type_id()), is_variable())(const_cast<Tensor&>(*this), names);
 }
 #endif
diff --git a/aten/src/ATen/native/NamedTensor.cpp b/aten/src/ATen/native/NamedTensor.cpp
index 8d1327b..5d7f5e4 100644
--- a/aten/src/ATen/native/NamedTensor.cpp
+++ b/aten/src/ATen/native/NamedTensor.cpp
@@ -6,11 +6,11 @@
 
 namespace at { namespace native {
 
-Tensor& set_names_(Tensor& self, optional<DimnameList> names) {
+Tensor& names_(Tensor& self, optional<DimnameList> names) {
   return at::internal_set_names_inplace(self, names);
 }
 
-Tensor set_names(const Tensor& self, optional<DimnameList> names) {
+Tensor view_names(const Tensor& self, optional<DimnameList> names) {
   auto result = self.alias();
   at::internal_set_names_inplace(result, names);
   return result;
@@ -114,7 +114,7 @@
         names,
         is_aligning_two_tensors);
   }
-  auto result = tensor.set_names(nullopt).view(expanded_sizes);
+  auto result = tensor.view_names(nullopt).view(expanded_sizes);
   at::internal_set_names_inplace(result, names);
   return result;
 }
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index a96f04a..d636280 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -35,11 +35,11 @@
 - func: set_data(Tensor(a!) self, Tensor new_data) -> void
   variants: method
 
-- func: set_names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
+- func: names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
   variants: method
   named_guard: False
 
-- func: set_names(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
+- func: view_names(Tensor(a) self, Dimname[]? names) -> Tensor(a)
   variants: method
   named_guard: False
 
diff --git a/test/test_namedtensor.py b/test/test_namedtensor.py
index 82db8c2..3b62846 100644
--- a/test/test_namedtensor.py
+++ b/test/test_namedtensor.py
@@ -31,8 +31,8 @@
     # Right now I don't know what it should look like.
     def assertTensorDataAndNamesEqual(self, x, y):
         self.assertEqual(x.names, y.names)
-        unnamed_x = x.set_names(None)
-        unnamed_y = y.set_names(None)
+        unnamed_x = x.view_names(None)
+        unnamed_y = y.view_names(None)
         self.assertEqual(unnamed_x, unnamed_y)
 
     def _test_factory(self, factory, device):
@@ -89,7 +89,7 @@
         self.assertTrue(fully_named.has_names())
 
     def test_repr(self):
-        named_tensor = torch.zeros(2, 3).set_names_(['N', 'C'])
+        named_tensor = torch.zeros(2, 3).names_(['N', 'C'])
         expected = "tensor([[0., 0., 0.],\n        [0., 0., 0.]], names=('N', 'C'))"
         self.assertEqual(repr(named_tensor), expected)
 
@@ -97,20 +97,20 @@
         expected = "tensor([[0., 0., 0.],\n        [0., 0., 0.]])"
         self.assertEqual(repr(unnamed_tensor), expected)
 
-        none_named_tensor = torch.zeros(2, 3).set_names_([None, None])
+        none_named_tensor = torch.zeros(2, 3).names_([None, None])
         self.assertEqual(repr(none_named_tensor), expected)
 
     def test_noncontig_contiguous(self):
         # This type of contiguous is special-cased and therefore needs its own test
         for device in torch.testing.get_all_device_types():
-            x = torch.randn(2, 3, device=device).t().set_names_(('N', 'C'))
+            x = torch.randn(2, 3, device=device).t().names_(('N', 'C'))
             self.assertEqual(x.contiguous().names, ('N', 'C'))
 
     def test_copy_transpose(self):
         # This type of copy is special-cased and therefore needs its own test
         def _test(self_names, other_names, expected_names):
             x = torch.empty(2, 5, names=self_names)
-            y = torch.empty(5, 2).t().set_names_(other_names)
+            y = torch.empty(5, 2).t().names_(other_names)
             x.copy_(y)
             self.assertEqual(x.names, expected_names)
 
@@ -118,28 +118,28 @@
         _test(('N', None), ('N', 'C'), ('N', 'C'))
         _test(None, ('N', 'C'), ('N', 'C'))
 
-    def test_set_names_(self):
+    def test_names_(self):
         tensor = torch.empty(1, 1, names=('N', 'C'))
-        self.assertEqual(tensor.set_names_(None).names, (None, None))
-        self.assertEqual(tensor.set_names_(['H', 'W']).names, ('H', 'W'))
+        self.assertEqual(tensor.names_(None).names, (None, None))
+        self.assertEqual(tensor.names_(['H', 'W']).names, ('H', 'W'))
         with self.assertRaisesRegex(RuntimeError, 'Number of names'):
-            tensor.set_names_(['N', 'C', 'W'])
+            tensor.names_(['N', 'C', 'W'])
         with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
-            tensor.set_names_(['N', 'N'])
+            tensor.names_(['N', 'N'])
 
-    def test_set_names(self):
+    def test_view_names(self):
         tensor = torch.empty(1, 1, names=('N', 'C'))
 
-        self.assertEqual(tensor.set_names(None).names, (None, None))
-        self.assertEqual(tensor.set_names(['H', 'W']).names, ('H', 'W'))
+        self.assertEqual(tensor.view_names(None).names, (None, None))
+        self.assertEqual(tensor.view_names(['H', 'W']).names, ('H', 'W'))
 
         # Check that we didn't modify tensor.names
         self.assertEqual(tensor.names, ('N', 'C'))
 
         with self.assertRaisesRegex(RuntimeError, 'Number of names'):
-            tensor.set_names(['N', 'C', 'W'])
+            tensor.view_names(['N', 'C', 'W'])
         with self.assertRaisesRegex(RuntimeError, 'duplicate names'):
-            tensor.set_names(['N', 'N'])
+            tensor.view_names(['N', 'N'])
 
     def test_set_names_property(self):
         tensor = torch.empty(1, 1, names=('N', 'C'))
@@ -167,7 +167,7 @@
             result = factory(1, 2, 3, names=names, device=device)
 
             torch.manual_seed(0)
-            expected = factory(1, 2, 3, device=device).set_names_(names)
+            expected = factory(1, 2, 3, device=device).names_(names)
 
             self.assertTensorDataAndNamesEqual(result, expected)
 
@@ -185,7 +185,7 @@
         for device in torch.testing.get_all_device_types():
             names = ('N', 'T', 'D')
             result = torch.full([1, 2, 3], 2, names=names, device=device)
-            expected = torch.full([1, 2, 3], 2, device=device).set_names_(names)
+            expected = torch.full([1, 2, 3], 2, device=device).names_(names)
             self.assertTensorDataAndNamesEqual(result, expected)
 
     def test_size(self):
diff --git a/test/test_torch.py b/test/test_torch.py
index 566dadb..a1fc1d1 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -226,8 +226,8 @@
                        'sparse_resize_',
                        'sparse_resize_and_clear_',
                        'align_to',  # BUILD_NAMEDTENSOR only
-                       'set_names',  # BUILD_NAMEDTENSOR only
-                       'set_names_',  # BUILD_NAMEDTENSOR only
+                       'view_names',  # BUILD_NAMEDTENSOR only
+                       'names_',  # BUILD_NAMEDTENSOR only
                        'has_names',  # BUILD_NAMEDTENSOR only
                        )
         test_namespace(torch.nn)