[aotinductor] Skip benchmarks with control flow (#109661)

Since AOTInductor doesn't support control flow yet, we will skip over tests that are currently failing due to containing control flow in the code. Logs taken from https://hud.pytorch.org/benchmark/compilers?startTime=Tue%2C%2012%20Sep%202023%2022%3A56%3A40%20GMT&stopTime=Tue%2C%2019%20Sep%202023%2022%3A56%3A40%20GMT&granularity=hour&suite=torchbench&mode=inference&dtype=bfloat16&lBranch=main&lCommit=2c1554a0323107d821be3ff13df7833b9f0b960d&rBranch=main&rCommit=47be61e12bd51df27182343d312dc3df485d5559

Errors documented in https://github.com/pytorch/pytorch/issues/105217

Pull Request resolved: https://github.com/pytorch/pytorch/pull/109661
Approved by: https://github.com/desertfire
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index f2de0cf..f7830b1 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -1767,6 +1767,10 @@
         return set()
 
     @property
+    def skip_models_due_to_control_flow(self):
+        return set()
+
+    @property
     def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
         raise NotImplementedError()
 
@@ -3276,6 +3280,9 @@
             assert not args.training, "AOTInductor only supports inference"
             assert args.devices == ["cuda"], "AOTInductor only tested for CUDA"
             optimize_ctx = export_aot_inductor
+
+            # AOTInductor doesn't support control flow yet
+            runner.skip_models.update(runner.skip_models_due_to_control_flow)
         else:
             optimize_ctx = torch._dynamo.optimize(args.backend, nopython=args.nopython)
         experiment = speedup_experiment
diff --git a/benchmarks/dynamo/huggingface.py b/benchmarks/dynamo/huggingface.py
index db0c5ce..ddbe439 100755
--- a/benchmarks/dynamo/huggingface.py
+++ b/benchmarks/dynamo/huggingface.py
@@ -162,6 +162,8 @@
     "BlenderbotForCausalLM",
 }
 
+SKIP_DUE_TO_CONTROL_FLOW = {"AllenaiLongformerBase"}
+
 
 REQUIRE_HIGHER_TOLERANCE_TRAINING = {
     "MT5ForConditionalGeneration",
@@ -414,6 +416,10 @@
     def fp32_only_models(self):
         return FP32_ONLY_MODELS
 
+    @property
+    def skip_models_due_to_control_flow(self):
+        return SKIP_DUE_TO_CONTROL_FLOW
+
     def _get_model_cls_and_config(self, model_name):
         if model_name not in EXTRA_MODELS:
             model_cls = get_module_cls_by_model_name(model_name)
diff --git a/benchmarks/dynamo/torchbench.py b/benchmarks/dynamo/torchbench.py
index 7aec917..1eeb151 100755
--- a/benchmarks/dynamo/torchbench.py
+++ b/benchmarks/dynamo/torchbench.py
@@ -85,6 +85,17 @@
     "clip",
 }
 
+SKIP_DUE_TO_CONTROL_FLOW = {
+    "cm3leon_generate",
+    "detectron2_fcos_r_50_fpn",
+    "fastNLP_Bert",
+    "hf_Longformer",
+    "hf_Reformer",
+    "hf_T5_generate",
+    "opacus_cifar10",
+    "speech_transformer",
+}
+
 SKIP_FOR_CPU = {
     "hf_T5_generate",  # OOMs
     "cm3leon_generate",  # model is CUDA only
@@ -315,6 +326,10 @@
     def skip_multiprocess_models(self):
         return ONLY_MULTIPROCESS
 
+    @property
+    def skip_models_due_to_control_flow(self):
+        return SKIP_DUE_TO_CONTROL_FLOW
+
     def load_model(
         self,
         device,