cheets_CTS_N: fix non-termination of while loop.

I clearly wrote bad code:
        while steps < self._max_retry and failed > 0:
            if failed > waived:
To fix we have to merge the two statements.
Also move all step increments immediately behind the while statements to
make termination obvious in all other cases.

BUG=chromium:705633
TEST=pylint
     cheets_CTS_N.7.1_r3.arm.CtsKeystoreTestCases
     cheets_CTS_N.7.1_r3.x86.CtsKeystoreTestCases
     (waivers applied, no hang)

Change-Id: I8734b5543ea2a69e07a59224f25792c7a761c592
Reviewed-on: https://chromium-review.googlesource.com/462418
Tested-by: Ilja H. Friedel <ihf@chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
diff --git a/server/site_tests/cheets_CTS/cheets_CTS.py b/server/site_tests/cheets_CTS/cheets_CTS.py
index 5218a60..61252cd 100644
--- a/server/site_tests/cheets_CTS/cheets_CTS.py
+++ b/server/site_tests/cheets_CTS/cheets_CTS.py
@@ -315,6 +315,7 @@
 
         # Unconditionally run CTS package until we see some tests executed.
         while steps < self._max_retry and total_tests == 0:
+            steps += 1
             with self._login_chrome():
                 self._ready_arc()
 
@@ -360,7 +361,6 @@
                 # works on local failures.
                 total_tests = tests
                 total_passed = passed
-                steps += 1
             # The DUT has rebooted at this point and is in a clean state.
         if total_tests == 0:
             raise error.TestFail('Error: Could not find any tests in package.')
@@ -370,8 +370,8 @@
         while steps < self._max_retry and (notexecuted > 0 or failed > 0):
             # First retry until there is no test is left that was not executed.
             while notexecuted > 0 and steps < self._max_retry:
+                steps += 1
                 with self._login_chrome():
-                    steps += 1
                     self._ready_arc()
                     logging.info('Continuing session %d:', session_id)
                     # 'Continue' reports as passed all passing results in the
diff --git a/server/site_tests/cheets_CTS_N/cheets_CTS_N.py b/server/site_tests/cheets_CTS_N/cheets_CTS_N.py
index 3bc1302..ae47dfa 100644
--- a/server/site_tests/cheets_CTS_N/cheets_CTS_N.py
+++ b/server/site_tests/cheets_CTS_N/cheets_CTS_N.py
@@ -318,6 +318,7 @@
 
         # Unconditionally run CTS module until we see some tests executed.
         while total_tests == 0 and steps < self._max_retry:
+            steps += 1
             with self._login_chrome():
                 self._ready_arc()
 
@@ -365,7 +366,6 @@
                 if total_tests == 0:
                     total_tests = tests
                 total_passed += passed
-                steps += 1
             # The DUT has rebooted at this point and is in a clean state.
         if total_tests == 0:
             raise error.TestFail('Error: Could not find any tests in module.')
@@ -373,50 +373,47 @@
         retry_inconsistency_error = None
         # If the results were not completed or were failing then continue or
         # retry them iteratively MAX_RETRY times.
-        while steps < self._max_retry and failed > 0:
-            # TODO(ihf): Use result_history to heuristically stop retries early.
-            if failed > waived:
-                with self._login_chrome():
-                    steps += 1
-                    self._ready_arc()
-                    logging.info('Retrying failures of %s with session_id %d:',
-                                 test_name, session_id)
-                    expected_tests = failed + notexecuted
-                    session_id, counts = self._tradefed_retry(test_name,
-                                                              session_id)
-                    tests, passed, failed, notexecuted, waived = counts
-                    self.result_history[steps] = counts
-                    # Consistency check, did we really run as many as we
-                    # thought initially?
-                    if expected_tests != tests:
-                        msg = ('Retry inconsistency - '
+        while steps < self._max_retry and failed > waived:
+            steps += 1
+            with self._login_chrome():
+                self._ready_arc()
+                logging.info('Retrying failures of %s with session_id %d:',
+                             test_name, session_id)
+                expected_tests = failed + notexecuted
+                session_id, counts = self._tradefed_retry(test_name,
+                                                          session_id)
+                tests, passed, failed, notexecuted, waived = counts
+                self.result_history[steps] = counts
+                # Consistency check, did we really run as many as we thought
+                # initially?
+                if expected_tests != tests:
+                    msg = ('Retry inconsistency - '
                            'initially saw %d failed+notexecuted, ran %d tests. '
                            'passed=%d, failed=%d, notexecuted=%d, waived=%d.' %
                            (expected_tests, tests, passed, failed, notexecuted,
                             waived))
-                        logging.warning(msg)
-                        if expected_tests > tests:
-                            # See b/36523200#comment8. Due to the existence of
-                            # the multiple tests having the same ID, more cases
-                            # may be run than previous fail count. As a
-                            # workaround, making it an error only when the tests
-                            # run were less than expected.
-                            # TODO(kinaba): Find a way to handle this dup.
-                            retry_inconsistency_error = msg
-                    if not self._consistent(tests, passed, failed, notexecuted):
-                        logging.warning('Tradefed inconsistency - retrying.')
-                        session_id, counts = self._tradefed_retry(test_name,
-                                                                  session_id)
-                        tests, passed, failed, notexecuted, waived = counts
-                        self.result_history[steps] = counts
-                    msg = 'retry(t=%d, p=%d, f=%d, ne=%d, w=%d)' % counts
-                    logging.info('RESULT: %s', msg)
-                    self.summary += ' ' + msg
-                    if not self._consistent(tests, passed, failed, notexecuted):
-                        logging.warning('Test count inconsistent. %s',
-                                        self.summary)
-                    total_passed += passed
-                # The DUT has rebooted at this point and is in a clean state.
+                    logging.warning(msg)
+                    if expected_tests > tests:
+                        # See b/36523200#comment8. Due to the existence of the
+                        # multiple tests having the same ID, more cases may be
+                        # run than previous fail count. As a workaround, making
+                        # it an error only when the tests run were less than
+                        # expected.
+                        # TODO(kinaba): Find a way to handle this dup.
+                        retry_inconsistency_error = msg
+                if not self._consistent(tests, passed, failed, notexecuted):
+                    logging.warning('Tradefed inconsistency - retrying.')
+                    session_id, counts = self._tradefed_retry(test_name,
+                                                              session_id)
+                    tests, passed, failed, notexecuted, waived = counts
+                    self.result_history[steps] = counts
+                msg = 'retry(t=%d, p=%d, f=%d, ne=%d, w=%d)' % counts
+                logging.info('RESULT: %s', msg)
+                self.summary += ' ' + msg
+                if not self._consistent(tests, passed, failed, notexecuted):
+                    logging.warning('Test count inconsistent. %s', self.summary)
+                total_passed += passed
+            # The DUT has rebooted at this point and is in a clean state.
 
         # Final classification of test results.
         if total_passed == 0 or failed > waived: