[skip ci]Revert "Add comments for adding shape function and linting"

This is a technical revert of 6d36bbde7eb2eb0aed448f694338cb49c2ae47f3 to reconcile it with e50478c02592597f12b8490ec5496f76c7d8b8cc (which is the same + lint changes applied)

Should be skipped during import
diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp
index 72dcc79..d8159ad 100644
--- a/test/cpp/jit/test_misc.cpp
+++ b/test/cpp/jit/test_misc.cpp
@@ -2973,15 +2973,6 @@
   }
 }
 
-TEST(TestShapeGraphLinting, Basic) {
-  auto schemas = RegisteredShapeComputeSchemas();
-  for (const auto& schema : schemas) {
-    auto g = shapeComputeGraphForSchema(*schema);
-    TORCH_INTERNAL_ASSERT(g);
-    LintShapeComputeGraph(schema, *g);
-  }
-}
-
 // TODO: move to test_kernel when global settings are explicit
 // fusion parameters
 class Composed : public ::testing::Test {
diff --git a/test/jit/test_symbolic_shape_analysis.py b/test/jit/test_symbolic_shape_analysis.py
index e756cdb..cd25caa 100644
--- a/test/jit/test_symbolic_shape_analysis.py
+++ b/test/jit/test_symbolic_shape_analysis.py
@@ -12,7 +12,6 @@
 )
 from torch.testing._internal.common_utils import make_tensor
 from torch.testing._internal.jit_utils import JitTestCase, execWrapper
-from typing import List, Any
 
 if __name__ == '__main__':
     raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
@@ -499,37 +498,3 @@
         m2_shape = [20, 10]
         res = torch.jit._shapes.matmul(m1_shape, m2_shape)
         self.assertEqual(res, [10, 10])
-
-    def test_register_function_error_checking(self):
-        # this will error before registering on global map, so
-        # no issue in overwriting schema mappings
-        @torch.jit.script
-        def foo(x, y):
-            return x + y
-
-        node = foo.graph.findNode("aten::add")
-
-        @torch.jit.script
-        def wrong_input_types(x, y):
-            x: List[int] = []
-            return x
-        with self.assertRaisesRegex(RuntimeError, "Expected supertype of int"):
-            torch._C._jit_register_shape_compute_graph_for_node(node, wrong_input_types.graph)
-
-        @torch.jit.script
-        def wrong_output_types(x: List[int], y: List[int]):
-            x: List[Tensor] = []
-            return x
-
-        with self.assertRaisesRegex(RuntimeError, "but got graph_type"):
-            torch._C._jit_register_shape_compute_graph_for_node(node, wrong_output_types.graph)
-
-        @torch.jit.script
-        def too_many_inputs(x: List[int], y: List[int], z: Any, z2: Any):
-            x: List[int] = []
-            return x
-
-        with self.assertRaises(RuntimeError) as error:
-            torch._C._jit_register_shape_compute_graph_for_node(node, too_many_inputs.graph)
-
-        self.assertTrue("fewer arguments than schema" in str(error.exception))
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index 25f1744..d2ab34b 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -170,19 +170,6 @@
             return DecompositionGraphForSchema(n->schema());
           })
       .def("_jit_pass_run_decompositions", RunDecompositions)
-      // using Node* here instead of Schema because looking up the schema
-      // and passing it in from Python will have a different pointer than the
-      // schema that is globally used for caching
-      .def(
-          "_jit_register_shape_compute_graph_for_node",
-          [](Node* n, std::shared_ptr<Graph>& graph) {
-            if (n->maybeSchema()) {
-              const FunctionSchema& schema = n->schema();
-              RegisterShapeComputeGraphForSchema(schema, graph);
-            } else {
-              TORCH_INTERNAL_ASSERT(false, "Expected schema", n);
-            }
-          })
       .def("_jit_pass_propagate_shapes_on_graph", PropagateShapesOnGraph)
       .def(
           "_jit_pass_propagate_shapes_on_graph_and_build_compute",
diff --git a/torch/csrc/jit/runtime/shape_functions.h b/torch/csrc/jit/runtime/shape_functions.h
index b5c1b0d..7de7dad 100644
--- a/torch/csrc/jit/runtime/shape_functions.h
+++ b/torch/csrc/jit/runtime/shape_functions.h
@@ -343,7 +343,7 @@
 
 def batch_norm(
     input: List[int],
-    weight: Optional[List[int]],
+    weight: List[int],
     bias: Optional[List[int]],
     running_mean: Optional[List[int]],
     running_var: Optional[List[int]],
diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp
index 4dd8c6b..db9d204 100644
--- a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp
+++ b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp
@@ -1,9 +1,6 @@
-#include <c10/util/Exception.h>
 #include <torch/csrc/jit/frontend/ir_emitter.h>
-#include <torch/csrc/jit/ir/ir_views.h>
 #include <torch/csrc/jit/jit_log.h>
 #include <torch/csrc/jit/passes/inliner.h>
-#include <torch/csrc/jit/runtime/graph_iterator.h>
 #include <torch/csrc/jit/runtime/operator.h>
 #include <torch/csrc/jit/runtime/symbolic_shape_registry.h>
 #include <torch/csrc/jit/runtime/symbolic_shape_registry_util.h>
@@ -163,140 +160,6 @@
   return at::nullopt;
 }
 
-TypePtr mapTensorToListOfInts(TypePtr type) {
-  if (type->cast<TensorType>()) {
-    return ListType::ofInts();
-  }
-  at::ArrayRef<TypePtr> contained = type->containedTypes();
-  if (contained.empty()) {
-    return type;
-  }
-  return type->withContained(
-      fmap(type->containedTypes(), mapTensorToListOfInts));
-}
-
-void checkForWhileLoop(
-    const FunctionSchema* schema,
-    std::shared_ptr<Graph> graph) {
-  DepthFirstGraphNodeIterator graph_it(graph);
-  for (auto* node = graph_it.next(); node != nullptr; node = graph_it.next()) {
-    if (node->kind() != prim::Loop) {
-      continue;
-    }
-    LoopView loop(node);
-    if (loop.loopType() != LoopView::For) {
-      TORCH_WARN(
-          "While loops are not yet implemented in unrolling which may make this shape function difficult to partially evaluate: ",
-          *node,
-          " for schema ",
-          *schema);
-    }
-  }
-}
-
-void checkInputReturnedAsOutput(
-    const FunctionSchema* schema,
-    const std::shared_ptr<Graph>& graph) {
-  // Could use alias db here as well but would have to warn because it's
-  // imprecise
-  for (size_t i : c10::irange(graph->inputs().size())) {
-    Value* input = graph->inputs().at(i);
-    for (size_t j : c10::irange(graph->outputs().size())) {
-      Value* output = graph->outputs().at(j);
-      TORCH_CHECK(
-          input != output,
-          "For schema: ",
-          *schema,
-          " input index ",
-          i,
-          " is returned as output index ",
-          j,
-          ". Shape functions must return new unaliased lists");
-    }
-  }
-}
-
-void checkInputAndOutputTypes(
-    const FunctionSchema* schema,
-    const std::shared_ptr<Graph>& graph) {
-  // allow extra unused arguments to map multiple functions to e.g. unary
-  TORCH_CHECK(
-      graph->inputs().size() <= schema->arguments().size(),
-      "Shape function must have fewer arguments than schema. Got ",
-      graph->inputs().size(),
-      " graph arguments and ",
-      schema->arguments().size(),
-      " schema arguments of schema: ",
-      *schema);
-
-  for (auto i : c10::irange(graph->inputs().size())) {
-    auto inp_type = schema->arguments().at(i).type();
-    auto mapped_type = mapTensorToListOfInts(inp_type);
-    auto graph_type = graph->inputs().at(i)->type();
-    TORCH_INTERNAL_ASSERT(
-        mapped_type->isSubtypeOf(graph->inputs().at(i)->type()),
-        "For schema type: ",
-        inp_type->str(),
-        " Expected supertype of ",
-        mapped_type->str(),
-        " but got graph_type ",
-        graph_type->str(),
-        " at index ",
-        i,
-        " of schema: ",
-        *schema);
-  }
-
-  TORCH_CHECK(
-      graph->outputs().size() == schema->returns().size(),
-      "Shape function equal number of outputs as schema. Got ",
-      graph->outputs().size(),
-      " graph outputs and ",
-      schema->returns().size(),
-      " schema returns of schema: ",
-      *schema);
-
-  for (auto i : c10::irange(schema->returns().size())) {
-    auto out_type = schema->returns().at(i).type();
-    auto mapped_type = mapTensorToListOfInts(out_type);
-    auto graph_type = graph->outputs().at(i)->type();
-    TORCH_INTERNAL_ASSERT(
-        mapped_type->isSubtypeOf(graph->outputs().at(i)->type()),
-        "For schema type: ",
-        out_type->str(),
-        " Expected supertype of ",
-        mapped_type->str(),
-        " but got graph_type ",
-        graph_type->str(),
-        " at output index ",
-        i,
-        " of schema: ",
-        *schema);
-  }
-}
-
-void transformShapeFunction(
-    const FunctionSchema* schema_string,
-    std::shared_ptr<Graph> graph) {
-  Inline(*graph);
-
-  // ATEN operators can return multiple unboxed values, this in contrast to
-  // functions defined in TorchScript or User-Registered Operators
-  // Which must use a Tuple
-  // Here, modify the shape graph of aten operators with multiple outputs
-  // so that they correspond to each other
-  if (schema_string->returns().size() > 1) {
-    TORCH_INTERNAL_ASSERT(
-        graph->outputs().size() == 1 &&
-        graph->outputs().at(0)->node()->kind() == prim::TupleConstruct);
-    auto tuple_node = graph->outputs().at(0)->node();
-    graph->eraseOutput(0);
-    for (Value* v : tuple_node->inputs()) {
-      graph->registerOutput(v);
-    }
-  }
-}
-
 void registerSchema(
     const FunctionSchema* schema_string,
     const std::string& shape_compute_function_name,
@@ -317,11 +180,26 @@
       module.get_function(shape_compute_function_name);
   std::shared_ptr<Graph> graph =
       toGraphFunction(shape_compute_function).graph();
+  Inline(*graph);
 
-  transformShapeFunction(schema_string, graph);
-  // NB: we lint the shape functions registered in source
-  // in a test file
-  // LintShapeComputeGraph(schema_string, graph);
+  // ATEN operators can return multiple unboxed values, this in contrast to
+  // functions defined in TorchScript or User-Registered Operators
+  // Which must use a Tuple
+  // Here, modify the shape graph of aten operators with multiple outputs
+  // so that they correspond to each other
+  if (schema_string->returns().size() > 1) {
+    TORCH_INTERNAL_ASSERT(
+        graph->outputs().size() == 1 &&
+        graph->outputs().at(0)->node()->kind() == prim::TupleConstruct);
+    auto tuple_node = graph->outputs().at(0)->node();
+    graph->eraseOutput(0);
+    for (Value* v : tuple_node->inputs()) {
+      graph->registerOutput(v);
+    }
+  }
+  // allow extra unused arguments to map multiple functions to e.g. unary
+  TORCH_INTERNAL_ASSERT(
+      graph->inputs().size() <= schema_string->arguments().size());
 
   cached_schema_to_graph[schema_string] = graph;
   reused_functions[shape_compute_function_name] = graph;
@@ -421,34 +299,8 @@
   if (cached_schema_to_graph.size() == 0) {
     loadFunctions();
   }
-  transformShapeFunction(&schema, g);
-  LintShapeComputeGraph(&schema, g);
-
   cached_schema_to_graph[&schema] = g;
 }
 
-std::vector<const FunctionSchema*> RegisteredShapeComputeSchemas() {
-  std::lock_guard<std::mutex> guard(lock);
-  if (cached_schema_to_graph.size() == 0) {
-    loadFunctions();
-  }
-
-  std::vector<const FunctionSchema*> schemas;
-  schemas.reserve(cached_schema_to_graph.size());
-  for (const auto& pair : cached_schema_to_graph) {
-    schemas.push_back(pair.first);
-  }
-  return schemas;
-}
-
-void LintShapeComputeGraph(
-    const FunctionSchema* schema,
-    const std::shared_ptr<Graph>& graph) {
-  checkInputAndOutputTypes(schema, graph);
-  checkForWhileLoop(schema, graph);
-  checkInputReturnedAsOutput(schema, graph);
-  // TODO: other checks ? list ops which we don't symbolically optimize, etc ?
-}
-
 } // namespace jit
 } // namespace torch
diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry.h b/torch/csrc/jit/runtime/symbolic_shape_registry.h
index 657ac9a..f9625ae 100644
--- a/torch/csrc/jit/runtime/symbolic_shape_registry.h
+++ b/torch/csrc/jit/runtime/symbolic_shape_registry.h
@@ -8,44 +8,6 @@
 namespace torch {
 namespace jit {
 
-/*
-ADDING A NEW SHAPE GRAPH:
-- For one node schema, there is one corresponding registered shape compute
-graph. The schema of the graph should be the same except for Tensor arguments.
-For every Tensor input in operator schema, there should be a List[int]
-corresponding to that Tensor's shape. For example: "aten::linear(Tensor input,
-Tensor weight, Tensor? bias=None) -> Tensor" ==> def linear(input: List[int],
-weight: List[int], bias: Optional[List[int]])
-
-Additionally, arguments which are unused at the end of the schema may be left
-off. This allows sharing a single graph for multiple function schemas, such as
-unary operators with different trailing arguments that do not affect the output
-shape.
-
-The shape graph should return a new, unaliased List[int] (or tuple of lists for
-multiple returns) and should not modify any input lists. This allows the shape
-graphs to be composed and executed.
-
-The shape analysis (particularly for non-complete, or symbolic shapes) works by
-partially evaluating the JIT IR. It may be possible for a Graph to be registered
-that we cannot currently partially evaluate. If this happens, please file an
-issue. There are lints registered to avoid particular known patterns (continue
-or break or early return in a loop). Those may be improved in the future, please
-file an issue if necessary.
-
-To debug (and write initially) the recommended flow is to define these functions
-in python and iterate there. Functions in `shape_functions.h` and
-`shape_functions_1.h` should be executable in python.
-
-To test operators, the preferred flow is through OpInfos, with
-`assert_jit_shape_analysis=True`. If this is not feasible, you can look at tests
-in `test_symbolic_shape_analysis.py` such as `test_adaptive_avg_pool2d`.
-
-Operators which take in a list of tensors, such as concat, are not yet
-supported. Concat has been special cased and could be generalized as needed.
-Please file an issue.
-*/
-
 TORCH_API void RegisterShapeComputeGraphForSchema(
     const FunctionSchema& schema,
     std::shared_ptr<Graph> g);
@@ -53,11 +15,5 @@
 TORCH_API c10::optional<std::shared_ptr<Graph>> shapeComputeGraphForSchema(
     const FunctionSchema& schema);
 
-TORCH_API std::vector<const FunctionSchema*> RegisteredShapeComputeSchemas();
-
-TORCH_API void LintShapeComputeGraph(
-    const FunctionSchema* schema,
-    const std::shared_ptr<Graph>& graph);
-
 } // namespace jit
 } // namespace torch
diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp b/torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp
index eb4c6ee..71c9730 100644
--- a/torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp
+++ b/torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp
@@ -118,8 +118,7 @@
       {"aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", "broadcast_one_three"},
       // TODO: enable slice, shape inference is not implemented for this op yet
   };
- // clang-format on
- return tensorexpr_elementwise_set;
+  return tensorexpr_elementwise_set;
 }
 
 }