Revert "[Reland] Add sym_size/stride/numel/storage_offset to native_function.… (#100749)"

This reverts commit bb454891ed5ce97f580ae52e20f8e9ff2d0f3bf5.
diff --git a/aten/src/ATen/core/function_schema.cpp b/aten/src/ATen/core/function_schema.cpp
index 6e119ae..7463e28 100644
--- a/aten/src/ATen/core/function_schema.cpp
+++ b/aten/src/ATen/core/function_schema.cpp
@@ -19,9 +19,6 @@
 }
 
 FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
-  auto alwaysCloneWithRealTypes = [&](const Argument& a) {
-    return a.cloneWithType(a.real_type());
-  };
   auto cloneWithRealTypes = [&](const Argument& a) {
     if (with_symint) {
       return a.cloneWithType(a.real_type());
@@ -42,8 +39,7 @@
   };
   std::vector<Argument> new_arguments, new_returns;
   std::transform(arguments().begin(), arguments().end(), std::back_inserter(new_arguments), cloneWithRealTypes);
-  // NB: SymInt returns are always SymInt
-  std::transform(returns().begin(), returns().end(), std::back_inserter(new_returns), alwaysCloneWithRealTypes);
+  std::transform(returns().begin(), returns().end(), std::back_inserter(new_returns), cloneWithRealTypes);
   return FunctionSchema(
     name(),
     overload_name(),
diff --git a/aten/src/ATen/native/TensorProperties.cpp b/aten/src/ATen/native/TensorProperties.cpp
index d989a4f..e37dbf5 100644
--- a/aten/src/ATen/native/TensorProperties.cpp
+++ b/aten/src/ATen/native/TensorProperties.cpp
@@ -49,22 +49,6 @@
   return self.stride(dim);
 }
 
-c10::SymInt sym_size(const Tensor& self, int64_t dim) {
-  return self.sym_size(dim);
-}
-
-c10::SymInt sym_stride(const Tensor& self, int64_t dim) {
-  return self.sym_stride(dim);
-}
-
-c10::SymInt sym_numel(const Tensor& self) {
-  return self.sym_numel();
-}
-
-c10::SymInt sym_storage_offset(const Tensor& self) {
-  return self.sym_storage_offset();
-}
-
 int64_t size(const Tensor& self, Dimname dim) {
   size_t pos_dim = dimname_to_position(self, dim);
   return self.sizes()[pos_dim];
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index aae47a3..3392b00 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -5115,27 +5115,6 @@
   device_check: NoCheck
   device_guard: False
 
-- func: sym_size.int(Tensor self, int dim) -> SymInt
-  variants: function
-  device_check: NoCheck
-  device_guard: False
-  tags: core
-  manual_cpp_binding: True
-
-- func: sym_numel(Tensor self) -> SymInt
-  variants: function
-  device_check: NoCheck
-  device_guard: False
-  tags: core
-  manual_cpp_binding: True
-
-- func: sym_storage_offset(Tensor self) -> SymInt
-  variants: function
-  device_check: NoCheck
-  device_guard: False
-  tags: core
-  manual_cpp_binding: True
-
 - func: slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
   variants: function, method
   device_check: NoCheck
@@ -5411,13 +5390,6 @@
   device_check: NoCheck
   device_guard: False
 
-- func: sym_stride.int(Tensor self, int dim) -> SymInt
-  variants: function
-  device_check: NoCheck
-  device_guard: False
-  tags: core
-  manual_cpp_binding: True
-
 - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
   device_check: NoCheck   # TensorIterator
   variants: function, method
diff --git a/test/functorch/test_vmap_registrations.py b/test/functorch/test_vmap_registrations.py
index 9dd52aa..11cb2e2 100644
--- a/test/functorch/test_vmap_registrations.py
+++ b/test/functorch/test_vmap_registrations.py
@@ -262,10 +262,6 @@
     "aten::subtract_.Scalar",
     "aten::subtract_.Tensor",
     "aten::svd.U",
-    "aten::sym_size.int",
-    "aten::sym_stride.int",
-    "aten::sym_numel",
-    "aten::sym_storage_offset",
     "aten::tensor_split.indices",
     "aten::tensor_split.sections",
     "aten::tensor_split.tensor_indices_or_sections",
diff --git a/tools/autograd/gen_python_functions.py b/tools/autograd/gen_python_functions.py
index 211aaca..f64b5f3 100644
--- a/tools/autograd/gen_python_functions.py
+++ b/tools/autograd/gen_python_functions.py
@@ -89,10 +89,6 @@
     "is_sparse_csr",
     "size",
     "stride",
-    "sym_size",
-    "sym_stride",
-    "sym_storage_offset",
-    "sym_numel",
     ".*_backward",
     ".*_backward_(out|input|weight|bias)",
     ".*_forward",
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py
index 58d5231..29fb7a5 100644
--- a/torch/_dynamo/eval_frame.py
+++ b/torch/_dynamo/eval_frame.py
@@ -918,21 +918,10 @@
         ):
             super().__init__(m)
             arg_len = len(flat_args)
-            self.new_args = []
-            for i in range(0, arg_len):
-                arg = super(ChangeInputOutputSignature, self).placeholder(
-                    f"arg{i}", (), {}
-                )
-                # Fill node.mata["val"] with faketensolintrunner from the input,
-                # if it's not found in matched_input_elements_positions
-                if (
-                    i not in matched_input_elements_positions
-                    and fake_mode is not None
-                    and isinstance(flat_args[i], torch.Tensor)
-                ):
-                    arg.node.meta["val"] = fake_mode.from_tensor(flat_args[i])
-                self.new_args.append(arg)
-
+            self.new_args = [
+                super(ChangeInputOutputSignature, self).placeholder(f"arg{i}", (), {})
+                for i in range(0, arg_len)
+            ]
             self.old_args_gen = (
                 self.new_args[i] for i in matched_input_elements_positions
             )
diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp
index 3d3a932..350ef5e 100644
--- a/torch/csrc/jit/runtime/register_prim_ops.cpp
+++ b/torch/csrc/jit/runtime/register_prim_ops.cpp
@@ -416,6 +416,16 @@
         sym_size,
         aliasAnalysisFromSchema()),
     OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::sym_size.int(Tensor self, int dim) -> SymInt"),
+        sym_size_int,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::sym_stride.int(Tensor self, int dim) -> SymInt"),
+        sym_stride_int,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::stride(Tensor self) -> int[]"),
         [](Stack& stack) {
           at::Tensor arg = pop(stack).toTensor();
@@ -423,6 +433,15 @@
         },
         aliasAnalysisFromSchema()),
     OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA("aten::sym_numel(Tensor self) -> SymInt"),
+        sym_numel,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
+        TORCH_SELECTIVE_SCHEMA(
+            "aten::sym_storage_offset(Tensor self) -> SymInt"),
+        sym_storage_offset,
+        aliasAnalysisFromSchema()),
+    OperatorGeneratorArgs(
         TORCH_SELECTIVE_SCHEMA("aten::sym_stride(Tensor self) -> SymInt[]"),
         sym_stride,
         aliasAnalysisFromSchema()),
diff --git a/torchgen/api/cpp.py b/torchgen/api/cpp.py
index 605e058..d921f83 100644
--- a/torchgen/api/cpp.py
+++ b/torchgen/api/cpp.py
@@ -226,9 +226,7 @@
 # and a function with a return type of 'std::tuple' has >1 return name.
 def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType:
     # placeholder is ignored
-    # NB: symint is ALWAYS respected for return types.  So symint argument
-    # here is IGNORED
-    r = valuetype_type(t, binds="__placeholder__", symint=True)
+    r = valuetype_type(t, binds="__placeholder__", symint=symint)
     if r is not None:
         return r.type
 
@@ -251,7 +249,7 @@
         assert (
             not mutable
         ), "Native functions should never return a mutable tensor list. They should return void."
-        elem = returntype_type(t.elem, mutable=False)
+        elem = returntype_type(t.elem, mutable=False, symint=symint)
         assert t.size is None, f"fixed size list returns not supported: {t}"
         return VectorCType(elem)
 
diff --git a/torchgen/api/types/signatures.py b/torchgen/api/types/signatures.py
index 3af5d9c..61a454d 100644
--- a/torchgen/api/types/signatures.py
+++ b/torchgen/api/types/signatures.py
@@ -35,8 +35,6 @@
     # Is this a symint C++ signature.  For BC reasons, functions that take
     # SymInts still present as int64_t in C++, and the SymInt variant is
     # offered at a different overload name
-    #
-    # NB: If a function RETURNS a SymInt, this is ALWAYS false
     symint: bool
 
     # The set of C++ arguments which should not have defaults applied to them
diff --git a/torchgen/model.py b/torchgen/model.py
index 607e88e..6772db1 100644
--- a/torchgen/model.py
+++ b/torchgen/model.py
@@ -1651,7 +1651,9 @@
         return self.kind() in [SchemaKind.inplace, SchemaKind.out, SchemaKind.mutable]
 
     def has_symint(self) -> bool:
-        return self.arguments.has_symint_arg()
+        return self.arguments.has_symint_arg() or any(
+            r.type.is_symint_like() for r in self.returns
+        )
 
     def __str__(self) -> str:
         all_arguments_str = str(self.arguments)