[autotest] Merge SiteDispatcher

BUG=chromium:672727
TEST=None

Change-Id: I6de8572e6fafe2ecc5800c9ca1ac6277efd60e34
Reviewed-on: https://chromium-review.googlesource.com/438827
Commit-Ready: Allen Li <ayatane@chromium.org>
Tested-by: Allen Li <ayatane@chromium.org>
Reviewed-by: Prathmesh Prabhu <pprabhu@chromium.org>
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index ea49fb5..3016775 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -271,7 +271,7 @@
     return command
 
 def _calls_log_tick_msg(func):
-    """Used to trace functions called by BaseDispatcher.tick."""
+    """Used to trace functions called by Dispatcher.tick."""
     @functools.wraps(func)
     def wrapper(self, *args, **kwargs):
         self._log_tick_msg('Starting %s' % func.__name__)
@@ -280,7 +280,7 @@
     return wrapper
 
 
-class BaseDispatcher(object):
+class Dispatcher(object):
 
 
     def __init__(self):
@@ -685,23 +685,26 @@
 
 
     def _check_for_unrecovered_verifying_entries(self):
+        # Verify is replaced by Reset.
         queue_entries = scheduler_models.HostQueueEntry.fetch(
-                where='status = "%s"' % models.HostQueueEntry.Status.VERIFYING)
-        unrecovered_hqes = []
+                where='status = "%s"' % models.HostQueueEntry.Status.RESETTING)
         for queue_entry in queue_entries:
             special_tasks = models.SpecialTask.objects.filter(
                     task__in=(models.SpecialTask.Task.CLEANUP,
-                              models.SpecialTask.Task.VERIFY),
+                              models.SpecialTask.Task.VERIFY,
+                              models.SpecialTask.Task.RESET),
                     queue_entry__id=queue_entry.id,
                     is_complete=False)
             if special_tasks.count() == 0:
-                unrecovered_hqes.append(queue_entry)
-
-        if unrecovered_hqes:
-            message = '\n'.join(str(hqe) for hqe in unrecovered_hqes)
-            raise scheduler_lib.SchedulerError(
-                    '%d unrecovered verifying host queue entries:\n%s' %
-                    (len(unrecovered_hqes), message))
+                logging.error('Unrecovered Resetting host queue entry: %s. '
+                              'Setting status to Queued.', str(queue_entry))
+                # Essentially this host queue entry was set to be Verifying
+                # however no special task exists for entry. This occurs if the
+                # scheduler dies between changing the status and creating the
+                # special task. By setting it to queued, the job can restart
+                # from the beginning and proceed correctly. This is much more
+                # preferable than having monitor_db not launching.
+                queue_entry.set_status('Queued')
 
 
     @_calls_log_tick_msg
@@ -737,6 +740,9 @@
                 print_message=message)
 
 
+    DEFAULT_REQUESTED_BY_USER_ID = 1
+
+
     def _reverify_hosts_where(self, where,
                               print_message='Reverifying host %s'):
         full_where='locked = 0 AND invalid = 0 AND ' + where
@@ -745,13 +751,19 @@
                 # host has already been recovered in some way
                 continue
             if self._host_has_scheduled_special_task(host):
-                # host will have a special task scheduled on the next tick
+                # host will have a special task scheduled on the next cycle
                 continue
             if print_message:
-                logging.info(print_message, host.hostname)
+                logging.error(print_message, host.hostname)
+            try:
+                user = models.User.objects.get(login='autotest_system')
+            except models.User.DoesNotExist:
+                user = models.User.objects.get(
+                        id=self.DEFAULT_REQUESTED_BY_USER_ID)
             models.SpecialTask.objects.create(
-                    task=models.SpecialTask.Task.CLEANUP,
-                    host=models.Host.objects.get(id=host.id))
+                    task=models.SpecialTask.Task.RESET,
+                    host=models.Host.objects.get(id=host.id),
+                    requested_by=user)
 
 
     def _recover_hosts(self):
@@ -1079,14 +1091,6 @@
             logging.debug(msg)
 
 
-SiteDispatcher = utils.import_site_class(
-    __file__, 'autotest_lib.scheduler.site_monitor_db',
-    'SiteDispatcher', BaseDispatcher)
-
-class Dispatcher(SiteDispatcher):
-    pass
-
-
 class Agent(object):
     """
     An agent for use by the Dispatcher class to perform a task.  An agent wraps
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index a4ae33e..d2982d6 100755
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -113,7 +113,7 @@
         self.god.stub_with(monitor_db, '_db_manager', connection_manager)
         self.god.stub_with(monitor_db, '_db', self._database)
 
-        self.god.stub_with(monitor_db.BaseDispatcher,
+        self.god.stub_with(monitor_db.Dispatcher,
                            '_get_pending_queue_entries',
                            self._get_pending_hqes)
         self.god.stub_with(scheduler_models, '_db', self._database)
diff --git a/scheduler/scheduler_models_unittest.py b/scheduler/scheduler_models_unittest.py
index 83e4d1c..22a278e 100755
--- a/scheduler/scheduler_models_unittest.py
+++ b/scheduler/scheduler_models_unittest.py
@@ -264,8 +264,8 @@
         hqe.status = models.HostQueueEntry.Status.STARTING
         hqe.started_on = datetime.datetime.now()
 
-        dispatcher = self.god.create_mock_class(monitor_db.BaseDispatcher,
-                                                'BaseDispatcher')
+        dispatcher = self.god.create_mock_class(monitor_db.Dispatcher,
+                                                'Dispatcher')
         agent = self.god.create_mock_class(monitor_db.Agent, 'Agent')
         dispatcher.get_agents_for_entry.expect_call(hqe).and_return([agent])
         agent.is_done.expect_call().and_return(agent_finished)
diff --git a/scheduler/site_monitor_db.py b/scheduler/site_monitor_db.py
index f607365..f008116 100644
--- a/scheduler/site_monitor_db.py
+++ b/scheduler/site_monitor_db.py
@@ -85,62 +85,3 @@
                 email_manager.manager.enqueue_notify_email(
                     'Job Aborted - Invalid Host Status', error_message)
                 entry.job.request_abort()
-
-
-class SiteDispatcher(object):
-    """
-    SiteDispatcher subclasses BaseDispatcher in monitor_db.
-    """
-    DEFAULT_REQUESTED_BY_USER_ID = 1
-
-
-    def _reverify_hosts_where(self, where,
-                              print_message='Reverifying host %s'):
-        """
-        This is an altered version of _reverify_hosts_where the class to
-        models.SpecialTask.objects.create passes in an argument for
-        requested_by, in order to allow the Reset task to be created
-        properly.
-        """
-        full_where='locked = 0 AND invalid = 0 AND ' + where
-        for host in scheduler_models.Host.fetch(where=full_where):
-            if self.host_has_agent(host):
-                # host has already been recovered in some way
-                continue
-            if self._host_has_scheduled_special_task(host):
-                # host will have a special task scheduled on the next cycle
-                continue
-            if print_message:
-                logging.error(print_message, host.hostname)
-            try:
-                user = models.User.objects.get(login='autotest_system')
-            except models.User.DoesNotExist:
-                user = models.User.objects.get(
-                        id=SiteDispatcher.DEFAULT_REQUESTED_BY_USER_ID)
-            models.SpecialTask.objects.create(
-                    task=models.SpecialTask.Task.RESET,
-                    host=models.Host.objects.get(id=host.id),
-                    requested_by=user)
-
-
-    def _check_for_unrecovered_verifying_entries(self):
-        # Verify is replaced by Reset.
-        queue_entries = scheduler_models.HostQueueEntry.fetch(
-                where='status = "%s"' % models.HostQueueEntry.Status.RESETTING)
-        for queue_entry in queue_entries:
-            special_tasks = models.SpecialTask.objects.filter(
-                    task__in=(models.SpecialTask.Task.CLEANUP,
-                              models.SpecialTask.Task.VERIFY,
-                              models.SpecialTask.Task.RESET),
-                    queue_entry__id=queue_entry.id,
-                    is_complete=False)
-            if special_tasks.count() == 0:
-                logging.error('Unrecovered Resetting host queue entry: %s. '
-                              'Setting status to Queued.', str(queue_entry))
-                # Essentially this host queue entry was set to be Verifying
-                # however no special task exists for entry. This occurs if the
-                # scheduler dies between changing the status and creating the
-                # special task. By setting it to queued, the job can restart
-                # from the beginning and proceed correctly. This is much more
-                # preferable than having monitor_db not launching.
-                queue_entry.set_status('Queued')