Revert "Run all of the timm models shards in the periodic (#92743)"

This reverts commit de69cedf98ae578f26add662c6387a43cf098066.

Reverted https://github.com/pytorch/pytorch/pull/92743 on behalf of https://github.com/atalman due to This needs to be landed after https://github.com/pytorch/pytorch/pull/92845 and https://github.com/pytorch/pytorch/pull/92846 are landed
diff --git a/.jenkins/pytorch/test.sh b/.jenkins/pytorch/test.sh
index 7bb6bca..b198469 100755
--- a/.jenkins/pytorch/test.sh
+++ b/.jenkins/pytorch/test.sh
@@ -294,18 +294,11 @@
 test_aot_eager_benchmark() {
   # Usage: test_dynamo_benchmark huggingface 0
 
-  local exit_status=0
-
   # Check inference with --float32
-  test_single_dynamo_benchmark "aot_eager_inference" "$@" --backend aot_eager || exit_status=$?
+  test_single_dynamo_benchmark "aot_eager_inference" "$@" --backend aot_eager
 
   # Check training with --amp
-  test_single_dynamo_benchmark "aot_eager_training" "$@" --backend aot_eager --training --amp || exit_status=$?
-
-  if [[ $exit_status -ne 0 ]]; then
-    echo "Some benchmarks failed; scroll up for details"
-  fi
-  return $exit_status
+  test_single_dynamo_benchmark "aot_eager_training" "$@" --backend aot_eager --training --amp
 }
 
 test_inductor_benchmark() {
@@ -350,18 +343,13 @@
 
 # No sharding for the periodic job, we don't care if latency is bad
 test_aot_eager_all() {
-  local exit_status=0
-  PYTHONPATH=$(pwd)/torchbench test_aot_eager_benchmark torchbench "" || exit_status=$?
-  test_aot_eager_benchmark huggingface "" || exit_status=$?
-  test_aot_eager_benchmark timm_models "" || exit_status=$?
-  if [[ $exit_status -ne 0 ]]; then
-    echo "Some benchmarks failed; scroll up for details"
-  fi
-  return $exit_status
+  PYTHONPATH=$(pwd)/torchbench test_aot_eager_benchmark torchbench 0
+  test_aot_eager_benchmark huggingface 0
+  test_aot_eager_benchmark timm_models 0
 }
 
 test_inductor_huggingface() {
-  test_inductor_benchmark huggingface ""
+  test_inductor_benchmark huggingface 0
 }
 
 test_inductor_huggingface_perf() {
@@ -385,7 +373,7 @@
 }
 
 test_inductor_torchbench() {
-  PYTHONPATH=$(pwd)/torchbench test_inductor_benchmark torchbench ""
+  PYTHONPATH=$(pwd)/torchbench test_inductor_benchmark torchbench 0
 }
 
 test_inductor_torchbench_perf() {
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index cccf77a..f87ce8b 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -86,9 +86,7 @@
     "detectron2_maskrcnn_r_101_fpn",
     "detectron2_maskrcnn_r_50_c4",
     "detectron2_maskrcnn_r_50_fpn",
-    "moco",  # Please convert all Tensors to FakeTensors first
     "hf_BigBird",  # OOM
-    "tacotron2",  # AssertionError: Deduped args out of bounds
     # Huggingface
     "BartForConditionalGeneration",  # OOM
     "DebertaV2ForQuestionAnswering",  # OOM
@@ -103,6 +101,7 @@
     "resnet50_quantized_qat",  # fp64_OOM
     "moco",
     "pytorch_struct",
+    "tacotron2",  # AssertionError: Deduped args out of bounds
     "vision_maskrcnn",
     # Huggingface
     "MBartForConditionalGeneration",  # OOM