Fix printing regular tensors inside functorch transforms (#85556)

Fixes https://github.com/pytorch/functorch/issues/1026

We need to disable functorch's stack-based dispatching mechanism inside
the tensor printing. Otherwise, all operations that clean up the data of
the Tensor for printing dispatch through the entire functorch stack and
causes problems.

Disabling stack-based dispatching and printing a functorch wrapped
tensor is not a problem; we're still able to get the attributes on the
wrapped tensor that we want.

Test Plan:
- new test
Pull Request resolved: https://github.com/pytorch/pytorch/pull/85556
Approved by: https://github.com/samdow
diff --git a/functorch/test/test_eager_transforms.py b/functorch/test/test_eager_transforms.py
index 88ce2ed..2b2aebf 100644
--- a/functorch/test/test_eager_transforms.py
+++ b/functorch/test/test_eager_transforms.py
@@ -816,6 +816,18 @@
                 expected = expected.replace("\n", "").replace("  ", "")
                 self.assertEqual(expected, buf)
 
+    def test_print_captured_tensor_inside_transform(self, device):
+        x = torch.tensor([1., 2., 3.], device=device)
+        out = None
+
+        def f(y):
+            nonlocal out
+            out = repr(x)
+            return y
+
+        vjp(f, torch.randn(4, device=device))
+        self.assertEqual(out, repr(x))
+
     def test_no_grad_outside(self, device):
         x = torch.randn([], device=device, requires_grad=True)
         with torch.no_grad():
diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py
index 49f152b..986be67 100644
--- a/torch/_tensor_str.py
+++ b/torch/_tensor_str.py
@@ -610,18 +610,7 @@
         torch._sync(tensor)
 
     value = torch._C._functorch.get_unwrapped(tensor)
-    dl_enabled = torch._C._are_functorch_transforms_active()
-    try:
-        # Disable temporarily FuncTorchDynamicLayerFrontMode and
-        # FuncTorchDynamicLayerBackMode as included dispatch keys
-        if dl_enabled:
-            torch._C._functorch._set_dynamic_layer_keys_included(False)
-        value_repr = repr(value)
-    finally:
-        # Reenable FuncTorchDynamicLayerFrontMode and
-        # FuncTorchDynamicLayerBackMode as included dispatch keys
-        if dl_enabled:
-            torch._C._functorch._set_dynamic_layer_keys_included(True)
+    value_repr = repr(value)
 
     indented_value_repr = textwrap.indent(value_repr, " " * 4)
     if torch._C._functorch.is_batchedtensor(tensor):
@@ -644,4 +633,5 @@
 
 def _str(self, *, tensor_contents=None):
     with torch.no_grad():
+        guard = torch._C._DisableFuncTorch()
         return _str_intern(self, tensor_contents=tensor_contents)