graphics_dEQP.py: rerun failing test after hasty mode

Tests which is indicated as failure in hasty mode will be
re-runed in individual mode as some tests cause problem
to the next following test and make it fail.

BUG=chromium:712299
TEST=test_that veyron_minnie graphics_dEQP.gles2-master.hasty.9
     (original: 1/1415, after rerun: 0/1415)

Change-Id: I43fc844ea7c12bb36b3f59ba5f83a81f8b2945fb
Reviewed-on: https://chromium-review.googlesource.com/502333
Commit-Ready: Po-Hsien Wang <pwang@chromium.org>
Tested-by: Ilja H. Friedel <ihf@chromium.org>
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/client/site_tests/graphics_dEQP/graphics_dEQP.py b/client/site_tests/graphics_dEQP/graphics_dEQP.py
index c89ed95..862c568 100644
--- a/client/site_tests/graphics_dEQP/graphics_dEQP.py
+++ b/client/site_tests/graphics_dEQP/graphics_dEQP.py
@@ -17,6 +17,8 @@
 from autotest_lib.client.cros import cros_logging, service_stopper
 from autotest_lib.client.cros.graphics import graphics_utils
 
+RERUN_RATIO = 0.02  # Ratio to rerun failing test for hasty mode
+
 
 class graphics_dEQP(test.test):
     """Run the drawElements Quality Program test suite.
@@ -562,6 +564,21 @@
             test_results = self.run_tests_hasty(test_cases, failing_test)
 
             logging.info("Failing Tests: %s", str(failing_test))
+            if len(failing_test) > 0:
+                if len(failing_test) < sum(test_results.values()) * RERUN_RATIO:
+                    logging.info("Because we are in hasty mode, we will rerun"
+                                 "the failing tests one at a time")
+                    rerun_results = self.run_tests_individually(failing_test)
+                    # Update failing test result from the test_results
+                    for result in test_results:
+                        if result.lower() not in self.TEST_RESULT_FILTER:
+                            test_results[result] = 0
+                    for result in rerun_results:
+                        test_results[result] = (test_results.get(result, 0) +
+                                                rerun_results[result])
+                else:
+                    logging.info("There are too many failing tests. It would "
+                                 "take too long to rerun them. Giving up.")
         else:
             logging.info('Running each test individually.')
             test_results = self.run_tests_individually(test_cases)