Rename tensor.is_named to has_named, expose has_named to python.

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/23315

Test Plan:
- [namedtensor ci]

gh-metadata: pytorch pytorch 23315 gh/zou3519/79/head

Imported from OSS

Differential Revision: D16494414

Pulled By: zou3519

fbshipit-source-id: d2d6beb45db9288e5df707b68b6046d783ca9f97
diff --git a/aten/src/ATen/NamedTensor.cpp b/aten/src/ATen/NamedTensor.cpp
index 327c3c9..80fe987 100644
--- a/aten/src/ATen/NamedTensor.cpp
+++ b/aten/src/ATen/NamedTensor.cpp
@@ -96,7 +96,7 @@
   }
 }
 
-bool internal_is_named(TensorImpl* impl) {
+bool internal_has_names(TensorImpl* impl) {
   const auto* named_tensor_meta = get_named_tensor_meta(impl);
   return named_tensor_meta != nullptr && named_tensor_meta->has_names();
 }
diff --git a/aten/src/ATen/NamedTensor.h b/aten/src/ATen/NamedTensor.h
index 88946c2..59ca1c4 100644
--- a/aten/src/ATen/NamedTensor.h
+++ b/aten/src/ATen/NamedTensor.h
@@ -54,7 +54,7 @@
 CAFFE2_API void internal_set_names_inplace(TensorImpl* impl, optional<DimnameList> names);
 CAFFE2_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
 CAFFE2_API optional<DimnameList> internal_get_names(TensorImpl* impl);
-CAFFE2_API bool internal_is_named(TensorImpl* impl);
+CAFFE2_API bool internal_has_names(TensorImpl* impl);
 
 
 } // namespace impl
diff --git a/aten/src/ATen/NamedTensorUtils.h b/aten/src/ATen/NamedTensorUtils.h
index 0f56cbc..d9f5f4c 100644
--- a/aten/src/ATen/NamedTensorUtils.h
+++ b/aten/src/ATen/NamedTensorUtils.h
@@ -12,7 +12,7 @@
 
 inline bool has_names(TensorList tensors) {
   return std::any_of(
-      tensors.begin(), tensors.end(), [](const Tensor& t) { return t.is_named(); });
+      tensors.begin(), tensors.end(), [](const Tensor& t) { return t.has_names(); });
 }
 
 // Sets the names of `tensor` to be `names`.
diff --git a/aten/src/ATen/core/Tensor.h b/aten/src/ATen/core/Tensor.h
index 310babc..c9701c9 100644
--- a/aten/src/ATen/core/Tensor.h
+++ b/aten/src/ATen/core/Tensor.h
@@ -262,7 +262,7 @@
 
 #ifdef BUILD_NAMEDTENSOR
   /// Returns if a `Tensor` has any dimension names
-  bool is_named() const;
+  bool has_names() const;
 
   /// Returns a `Tensor`'s dimension names data structure
   const NamedTensorMeta* get_named_tensor_meta() const;
diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h
index a4a078f..c4e7dd2 100644
--- a/aten/src/ATen/core/TensorMethods.h
+++ b/aten/src/ATen/core/TensorMethods.h
@@ -1765,8 +1765,8 @@
   return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
 }
 
-inline bool Tensor::is_named() const {
-  return impl::internal_is_named(unsafeGetTensorImpl());
+inline bool Tensor::has_names() const {
+  return impl::internal_has_names(unsafeGetTensorImpl());
 }
 #endif
 
diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py
index 432a6ea..56d58f0 100644
--- a/aten/src/ATen/function_wrapper.py
+++ b/aten/src/ATen/function_wrapper.py
@@ -602,7 +602,7 @@
         return ''
     named_conditions = []
     for tensor in tensors:
-        named_conditions.append('{}.is_named()'.format(tensor))
+        named_conditions.append('{}.has_names()'.format(tensor))
     for tensorlist in tensorlists:
         named_conditions.append('at::has_names({})'.format(tensorlist))
     return ("""\
diff --git a/aten/src/ATen/native/TensorIterator.cpp b/aten/src/ATen/native/TensorIterator.cpp
index 71be1e7..e20ddb9 100644
--- a/aten/src/ATen/native/TensorIterator.cpp
+++ b/aten/src/ATen/native/TensorIterator.cpp
@@ -223,7 +223,7 @@
     // don't include output tensors that are not also input tensors.
     if (resize_outputs_ && op.is_output && !op.is_read_write) continue;
     // perform name inference
-    if (!op.tensor.is_named()) {
+    if (!op.tensor.has_names()) {
       continue;
     }
     auto tensor_names = *op.tensor.names();
diff --git a/aten/src/ATen/templates/Tensor.h b/aten/src/ATen/templates/Tensor.h
index deba430..6175a22 100644
--- a/aten/src/ATen/templates/Tensor.h
+++ b/aten/src/ATen/templates/Tensor.h
@@ -262,7 +262,7 @@
 
 #ifdef BUILD_NAMEDTENSOR
   /// Returns if a `Tensor` has any dimension names
-  bool is_named() const;
+  bool has_names() const;
 
   /// Returns a `Tensor`'s dimension names data structure
   const NamedTensorMeta* get_named_tensor_meta() const;
diff --git a/aten/src/ATen/templates/TensorMethods.h b/aten/src/ATen/templates/TensorMethods.h
index 8da9257..1d4b1ac 100644
--- a/aten/src/ATen/templates/TensorMethods.h
+++ b/aten/src/ATen/templates/TensorMethods.h
@@ -98,8 +98,8 @@
   return static_cast<NamedTensorMeta*>(impl_->named_tensor_meta());
 }
 
-inline bool Tensor::is_named() const {
-  return impl::internal_is_named(unsafeGetTensorImpl());
+inline bool Tensor::has_names() const {
+  return impl::internal_has_names(unsafeGetTensorImpl());
 }
 #endif
 
diff --git a/aten/src/ATen/test/NamedTensor_test.cpp b/aten/src/ATen/test/NamedTensor_test.cpp
index c448135..a0b3e04 100644
--- a/aten/src/ATen/test/NamedTensor_test.cpp
+++ b/aten/src/ATen/test/NamedTensor_test.cpp
@@ -26,12 +26,12 @@
 
 TEST(NamedTensorTest, isNamed) {
   auto tensor = at::zeros({3, 2, 5, 7});
-  ASSERT_FALSE(tensor.is_named());
+  ASSERT_FALSE(tensor.has_names());
 
   tensor = at::zeros({3, 2, 5, 7});
   tensor.unsafeGetTensorImpl()->set_named_tensor_meta(
       make_unique<NamedTensorMeta>(tensor.dim()));
-  ASSERT_FALSE(tensor.is_named());
+  ASSERT_FALSE(tensor.has_names());
 
   tensor = at::zeros({3, 2, 5, 7});
   auto N = dimnameFromString("N");
@@ -41,7 +41,7 @@
   std::vector<Dimname> names = { N, C, H, W };
   tensor.unsafeGetTensorImpl()->set_named_tensor_meta(
       make_unique<NamedTensorMeta>(names));
-  ASSERT_TRUE(tensor.is_named());
+  ASSERT_TRUE(tensor.has_names());
 }
 
 static bool dimnames_equal(at::DimnameList names, at::DimnameList other) {
@@ -74,7 +74,7 @@
 
   // Test dropping metadata
   tensor.unsafeGetTensorImpl()->set_named_tensor_meta(nullptr);
-  ASSERT_FALSE(tensor.is_named());
+  ASSERT_FALSE(tensor.has_names());
 }
 
 TEST(NamedTensorTest, internalSetNamesInplace) {
@@ -84,7 +84,7 @@
   auto H = dimnameFromString("H");
   auto W = dimnameFromString("W");
   std::vector<Dimname> names = { N, C, H, W };
-  ASSERT_FALSE(tensor.is_named());
+  ASSERT_FALSE(tensor.has_names());
 
   // Set names
   at::internal_set_names_inplace(tensor, names);
diff --git a/test/test_namedtensor.py b/test/test_namedtensor.py
index 6b8589b..1e0b105 100644
--- a/test/test_namedtensor.py
+++ b/test/test_namedtensor.py
@@ -74,6 +74,17 @@
     def test_empty(self):
         self._test_factory(torch.empty, 'cpu')
 
+    def test_has_names(self):
+        unnamed = torch.empty(2, 3)
+        none_named = torch.empty(2, 3, names=(None, None))
+        partially_named = torch.empty(2, 3, names=('N', None))
+        fully_named = torch.empty(2, 3, names=('N', 'C'))
+
+        self.assertFalse(unnamed.has_names())
+        self.assertFalse(none_named.has_names())
+        self.assertTrue(partially_named.has_names())
+        self.assertTrue(fully_named.has_names())
+
     def test_copy_transpose(self):
         # This type of copy is special-cased and therefore needs its own test
         def _test(self_names, other_names, expected_names):
diff --git a/test/test_torch.py b/test/test_torch.py
index d91eff0..43422dd 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -226,6 +226,7 @@
                        'sparse_resize_',
                        'sparse_resize_and_clear_',
                        'set_names_',  # BUILD_NAMEDTENSOR only
+                       'has_names',  # BUILD_NAMEDTENSOR only
                        )
         test_namespace(torch.nn)
         test_namespace(torch.nn.functional, 'assert_int_or_pair', 'bilinear', 'feature_alpha_dropout')
diff --git a/tools/autograd/templates/python_variable_methods.cpp b/tools/autograd/templates/python_variable_methods.cpp
index fda0886..378593e 100644
--- a/tools/autograd/templates/python_variable_methods.cpp
+++ b/tools/autograd/templates/python_variable_methods.cpp
@@ -146,6 +146,16 @@
   END_HANDLE_TH_ERRORS
 }
 
+#ifdef BUILD_NAMEDTENSOR
+static PyObject * THPVariable_has_names(PyObject* self_, PyObject* args)
+{
+  HANDLE_TH_ERRORS
+  auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
+  return wrap(self.has_names());
+  END_HANDLE_TH_ERRORS
+}
+#endif
+
 static PyObject * THPVariable_data_ptr(PyObject* self_, PyObject* args)
 {
   HANDLE_TH_ERRORS
@@ -763,6 +773,9 @@
   {"cuda", (PyCFunction)THPVariable_cuda, METH_VARARGS | METH_KEYWORDS, NULL},
   {"data_ptr", (PyCFunction)THPVariable_data_ptr, METH_NOARGS, NULL},
   {"dim", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
+#ifdef BUILD_NAMEDTENSOR
+  {"has_names", (PyCFunction)THPVariable_has_names, METH_NOARGS, NULL},
+#endif
   {"double", (PyCFunction)THPVariable_double, METH_NOARGS, NULL},
   {"element_size", (PyCFunction)THPVariable_element_size, METH_NOARGS, NULL},
   {"float", (PyCFunction)THPVariable_float, METH_NOARGS, NULL},
diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp
index 999a593..3f72bb2 100644
--- a/torch/csrc/autograd/python_variable.cpp
+++ b/torch/csrc/autograd/python_variable.cpp
@@ -332,7 +332,7 @@
   THPObjectPtr tuple(PyTuple_New(size));
   if (!tuple) throw python_error();
 
-  if (!self->cdata.is_named()) {
+  if (!self->cdata.has_names()) {
     for (size_t i = 0; i < size; ++i) {
       PyTuple_SET_ITEM(tuple.get(), i, Py_None);
     }