Don't specialize when indexing by SymInt (#99123)
Fixes https://github.com/pytorch/pytorch/issues/99091
Signed-off-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/99123
Approved by: https://github.com/msaroufim
diff --git a/test/dynamo/test_subgraphs.py b/test/dynamo/test_subgraphs.py
index 41a2116..e63d6ef 100644
--- a/test/dynamo/test_subgraphs.py
+++ b/test/dynamo/test_subgraphs.py
@@ -379,6 +379,19 @@
# just one graph now rather than 10
self.assertEqual(cnt_dynamic.frame_count, 1)
+ @patch("torch._dynamo.config.dynamic_shapes", True)
+ @patch("torch._dynamo.config.assume_static_by_default", False)
+ def test_dynamic_getitem(self):
+ def fn(a, b):
+ return a[b.size(0) - 1]
+
+ cnt = torch._dynamo.testing.CompileCounter()
+ opt_fn = torch._dynamo.optimize(cnt)(fn)
+ for i in range(3, 12):
+ opt_fn(torch.randn(i), torch.randn(i))
+ # just one graph
+ self.assertEqual(cnt.frame_count, 1)
+
def test_dynamic_kwarg(self):
def fn(a, b):
return a - b * 10
diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py
index 775274e..8829efe 100644
--- a/torch/_dynamo/variables/builtin.py
+++ b/torch/_dynamo/variables/builtin.py
@@ -479,6 +479,16 @@
# Work around weird bug in hf_T5
fn, args = operator.add, [args[1], args[0]]
+ if self.fn is operator.getitem and isinstance(args[1], SymNodeVariable):
+ # Standard indexing will force specialization due to
+ # __index__. Rewrite as a regular torch op which will
+ # trace fine
+ fn, args = torch.select, [
+ args[0],
+ variables.ConstantVariable(0),
+ args[1],
+ ]
+
proxy = tx.output.create_proxy(
"call_function",
fn,