Move sigmoid run_const_graph HOP to PyTorch core (#132526)

Summary: When HOPs live out of tree, it makes it impossible to make breaking changes to the HOP API. But HOP implementations are deeply entwined with PyTorch internals. Move the HOP into PyTorch tree so that changes are possible.

Test Plan: sandcastle and oss ci

Differential Revision: D60674861

Pull Request resolved: https://github.com/pytorch/pytorch/pull/132526
Approved by: https://github.com/SherlockNoMad
diff --git a/torch/_higher_order_ops/run_const_graph.py b/torch/_higher_order_ops/run_const_graph.py
new file mode 100644
index 0000000..317e772
--- /dev/null
+++ b/torch/_higher_order_ops/run_const_graph.py
@@ -0,0 +1,54 @@
+# mypy: allow-untyped-defs
+import torch
+from torch._C import DispatchKey
+from torch._higher_order_ops.utils import autograd_not_implemented
+from torch._ops import HigherOrderOperator
+from torch._subclasses.fake_tensor import FakeTensorMode
+from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
+from torch.utils import _pytree as pytree
+
+
+run_const_graph = HigherOrderOperator("run_const_graph")
+
+
+@run_const_graph.py_impl(ProxyTorchDispatchMode)
+def run_const_graph_dispatch_mode(mode, *args):
+    if not mode.enable_tracing:
+        return run_const_graph(*args)
+    const_gm, weights = args
+    p_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
+    assert isinstance(const_gm, torch.fx.GraphModule)
+    assert not hasattr(mode.tracer.root, "_const_graph")
+    mode.tracer.root.register_module("_const_graph", const_gm)
+
+    proxy = mode.tracer.create_proxy("call_function", run_const_graph, p_args, {})
+
+    out = const_gm(*weights)
+    return track_tensor_tree(out, proxy, constant=None, tracer=mode.tracer)
+
+
+@run_const_graph.py_functionalize_impl
+def run_const_graph_functional(ctx, *args):
+    unwrapped_args = ctx.unwrap_tensors(args)
+
+    with ctx.redispatch_to_next():
+        out = run_const_graph(*unwrapped_args)
+        return ctx.wrap_tensors(out)
+
+
+run_const_graph.py_impl(DispatchKey.Autograd)(
+    autograd_not_implemented(run_const_graph, deferred_error=True)
+)
+
+
+@run_const_graph.py_impl(FakeTensorMode)
+def run_const_graph_fake_tensor_mode(mode, graph, args):
+    assert isinstance(graph, torch.fx.GraphModule)
+    with mode:
+        return graph(*args)
+
+
+@run_const_graph.py_impl(DispatchKey.CPU)
+def run_const_graph_cpu(graph, args):
+    assert isinstance(graph, torch.fx.GraphModule)
+    return graph(*args)