Delete Tensor constructor from TensorOptions. (#11101)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11101

I'd like to invert the dependency between Tensor and TensorOptions
(such that Tensor includes TensorOptions); to do this, I'd prefer
there to not be a Tensor constructor.  Eventually, all references
of Tensor will disappear from TensorOptions.h

Reviewed By: cpuhrsch

Differential Revision: D9585627

fbshipit-source-id: dd4a28b2c06b1e55f629762915f03c2b6c34d840
diff --git a/aten/src/ATen/TensorOptions.h b/aten/src/ATen/TensorOptions.h
index 9788a13..161d771 100644
--- a/aten/src/ATen/TensorOptions.h
+++ b/aten/src/ATen/TensorOptions.h
@@ -63,18 +63,6 @@
   /// - requires_grad: false
   explicit TensorOptions(bool use_thread_local_default_options);
 
-  /// Constructs the `TensorOptions` from the type of the given `Tensor`.
-  /// If the `Tensor` has a CUDA type, the `device_index` will match that of the
-  /// tensor. The `requires_grad` property of the tensor is ignored and set to
-  /// false in the created `TensorOptions`.  See the constructor from `Type` for
-  /// the semantics w.r.t. the `type()` method.
-  explicit TensorOptions(Tensor tensor) {
-    this->dtype(tensor.dtype());
-    this->device(tensor.device());
-    this->layout(tensor.layout());
-    this->is_variable(tensor.is_variable());
-  }
-
   /// Constructs the `TensorOptions` from a type and a `device_index`.
   /* implicit */ TensorOptions(
       const Type& type,
@@ -244,7 +232,10 @@
 
 /// From Tensor.h
 inline TensorOptions Tensor::options() const {
-  return TensorOptions(*this);
+  return TensorOptions().dtype(dtype())
+                        .device(device())
+                        .layout(layout())
+                        .is_variable(is_variable());
 }
 
 namespace detail {
diff --git a/test/cpp/api/tensor_options.cpp b/test/cpp/api/tensor_options.cpp
index fc694c3..7061e28 100644
--- a/test/cpp/api/tensor_options.cpp
+++ b/test/cpp/api/tensor_options.cpp
@@ -70,19 +70,19 @@
 }
 
 TEST_CASE("TensorOptions/ConstructsWellFromCPUTensors") {
-  auto options = TensorOptions(empty(5, kDouble));
+  auto options = empty(5, kDouble).options();
   REQUIRE_OPTIONS(kCPU, -1, kDouble, kStrided);
 
-  options = TensorOptions(empty(5, getNonVariableType(Backend::SparseCPU, kByte)));
+  options = empty(5, getNonVariableType(Backend::SparseCPU, kByte)).options();
   REQUIRE_OPTIONS(kCPU, -1, kByte, kSparse);
 }
 
 TEST_CASE("TensorOptions/ConstructsWellFromVariables") {
-  auto options = TensorOptions(torch::empty(5));
+  auto options = torch::empty(5).options();
   REQUIRE_OPTIONS(kCPU, -1, kFloat, kStrided);
   REQUIRE(!options.requires_grad());
 
-  options = TensorOptions(torch::empty(5, at::requires_grad()));
+  options = torch::empty(5, at::requires_grad()).options();
   REQUIRE_OPTIONS(kCPU, -1, kFloat, kStrided);
   REQUIRE(!options.requires_grad());
 }
diff --git a/test/cpp/api/tensor_options_cuda.cpp b/test/cpp/api/tensor_options_cuda.cpp
index ff3780a..a5cc951 100644
--- a/test/cpp/api/tensor_options_cuda.cpp
+++ b/test/cpp/api/tensor_options_cuda.cpp
@@ -42,10 +42,10 @@
 }
 
 TEST_CASE("TensorOptions/ConstructsWellFromCUDATensors", "[multi-cuda]") {
-  auto options = TensorOptions(empty(5, device(kCUDA).dtype(kDouble)));
+  auto options = empty(5, device(kCUDA).dtype(kDouble)).options();
   REQUIRE_OPTIONS(kCUDA, 0, kDouble, kStrided);
 
-  options = TensorOptions(empty(5, getNonVariableType(Backend::SparseCUDA, kByte)));
+  options = empty(5, getNonVariableType(Backend::SparseCUDA, kByte)).options();
   REQUIRE_OPTIONS(kCUDA, 0, kByte, kSparse);
 
   if (at::globalContext().getNumGPUs() > 1) {
@@ -54,14 +54,14 @@
       DeviceGuard guard(1);
       tensor = empty(5, device(kCUDA));
     }
-    options = TensorOptions(tensor);
+    options = tensor.options();
     REQUIRE_OPTIONS(kCUDA, 1, kFloat, kStrided);
 
     {
       DeviceGuard guard(1);
       tensor = empty(5, device(kCUDA).layout(kSparse));
     }
-    options = TensorOptions(tensor);
+    options = tensor.options();
     REQUIRE_OPTIONS(kCUDA, 1, kFloat, kSparse);
   }
 }
diff --git a/tools/autograd/gen_variable_factories.py b/tools/autograd/gen_variable_factories.py
index 64da273..efb8fbb 100644
--- a/tools/autograd/gen_variable_factories.py
+++ b/tools/autograd/gen_variable_factories.py
@@ -57,7 +57,8 @@
         actuals.append(actual)
     requires_grad = "options.requires_grad()" if has_tensor_options else "false"
     if decl['name'].endswith('_like') and not has_tensor_options:
-        actuals.append('at::TensorOptions({}).is_variable(false)'.format(actuals[0]))
+        # it's a tensor
+        actuals.append('{}.options().is_variable(false)'.format(actuals[0]))
 
     pre_record_trace, post_record_trace = format_trace(decl)