[autotest] Remove final vestiges of ARCHIVING

BUG=chromium:699275
TEST=None

Change-Id: I1555a7716f1e62e58b71f709afa4c95057282b6b
Reviewed-on: https://chromium-review.googlesource.com/544532
Commit-Ready: Allen Li <ayatane@chromium.org>
Tested-by: Allen Li <ayatane@chromium.org>
Reviewed-by: Prathmesh Prabhu <pprabhu@chromium.org>
diff --git a/client/common_lib/host_queue_entry_states.py b/client/common_lib/host_queue_entry_states.py
index 2ed22f0..c2091ad 100644
--- a/client/common_lib/host_queue_entry_states.py
+++ b/client/common_lib/host_queue_entry_states.py
@@ -9,7 +9,7 @@
 
 Status_list = ['Queued', 'Starting', 'Resetting', 'Verifying', 'Provisioning',
                'Pending', 'Waiting', 'Running', 'Gathering', 'Parsing',
-               'Archiving', 'Aborted', 'Completed', 'Failed', 'Stopped',
+               'Aborted', 'Completed', 'Failed', 'Stopped',
                'Cleaning', 'Template']
 
 Status = enum.Enum(*Status_list, string_values=True)
diff --git a/scheduler/agent_task.py b/scheduler/agent_task.py
index 3ba6557..def5484 100644
--- a/scheduler/agent_task.py
+++ b/scheduler/agent_task.py
@@ -103,7 +103,6 @@
       |--->GatherLogsTask
       |--->SelfThrottledPostJobTask
             |--->FinalReparseTask
-            |--->ArchiveResultsTask
 
 """
 
diff --git a/scheduler/archive_results.control.srv b/scheduler/archive_results.control.srv
deleted file mode 100644
index 567e16a..0000000
--- a/scheduler/archive_results.control.srv
+++ /dev/null
@@ -1,3 +0,0 @@
-# TODO(ayatane): We don't rely on this for archiving (originally for
-# copying to cautotest).
-pass
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 1c0dfe7..8abaa16 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -510,8 +510,7 @@
         statuses = (models.HostQueueEntry.Status.STARTING,
                     models.HostQueueEntry.Status.RUNNING,
                     models.HostQueueEntry.Status.GATHERING,
-                    models.HostQueueEntry.Status.PARSING,
-                    models.HostQueueEntry.Status.ARCHIVING)
+                    models.HostQueueEntry.Status.PARSING)
         status_list = ','.join("'%s'" % status for status in statuses)
         queue_entries = scheduler_models.HostQueueEntry.fetch(
                 where='status IN (%s)' % status_list)
@@ -566,8 +565,6 @@
             return postjob_task.GatherLogsTask(queue_entries=task_entries)
         if queue_entry.status == models.HostQueueEntry.Status.PARSING:
             return postjob_task.FinalReparseTask(queue_entries=task_entries)
-        if queue_entry.status == models.HostQueueEntry.Status.ARCHIVING:
-            return postjob_task.ArchiveResultsTask(queue_entries=task_entries)
 
         raise scheduler_lib.SchedulerError(
                 '_get_agent_task_for_queue_entry got entry with '
@@ -575,8 +572,7 @@
 
 
     def _check_for_duplicate_host_entries(self, task_entries):
-        non_host_statuses = (models.HostQueueEntry.Status.PARSING,
-                             models.HostQueueEntry.Status.ARCHIVING)
+        non_host_statuses = {models.HostQueueEntry.Status.PARSING}
         for task_entry in task_entries:
             using_host = (task_entry.host is not None
                           and task_entry.status not in non_host_statuses)
@@ -897,7 +893,7 @@
         calling the Agents tick().
 
         This method creates an agent for each HQE in one of (starting, running,
-        gathering, parsing, archiving) states, and adds it to the dispatcher so
+        gathering, parsing) states, and adds it to the dispatcher so
         it is handled by _handle_agents.
         """
         for agent_task in self._get_queue_entry_agent_tasks():
diff --git a/scheduler/postjob_task.py b/scheduler/postjob_task.py
index 9d60e81..ff25ac0 100644
--- a/scheduler/postjob_task.py
+++ b/scheduler/postjob_task.py
@@ -346,39 +346,3 @@
     def epilog(self):
         super(FinalReparseTask, self).epilog()
         self._set_all_statuses(self._final_status())
-
-
-class ArchiveResultsTask(SelfThrottledPostJobTask):
-    _ARCHIVING_FAILED_FILE = '.archiver_failed'
-
-    def __init__(self, queue_entries):
-        super(ArchiveResultsTask, self).__init__(queue_entries,
-                                                 log_file_name='.archiving.log')
-        # don't use _set_ids, since we don't want to set the host_ids
-        self.queue_entry_ids = [entry.id for entry in queue_entries]
-
-
-    def _pidfile_name(self):
-        return drone_manager.ARCHIVER_PID_FILE
-
-
-    def _generate_command(self, results_dir):
-        return ['true']
-
-
-    @classmethod
-    def _max_processes(cls):
-        return scheduler_config.config.max_transfer_processes
-
-
-    def prolog(self):
-        self._check_queue_entry_statuses(
-                self.queue_entries,
-                allowed_hqe_statuses=(models.HostQueueEntry.Status.ARCHIVING,))
-
-        super(ArchiveResultsTask, self).prolog()
-
-
-    def epilog(self):
-        super(ArchiveResultsTask, self).epilog()
-        self._set_all_statuses(self._final_status())
diff --git a/scheduler/scheduler_models.py b/scheduler/scheduler_models.py
index 12a9b09..7b4146e 100644
--- a/scheduler/scheduler_models.py
+++ b/scheduler/scheduler_models.py
@@ -868,7 +868,7 @@
         assert self.aborted and not self.complete
 
         Status = models.HostQueueEntry.Status
-        if self.status in (Status.GATHERING, Status.PARSING, Status.ARCHIVING):
+        if self.status in {Status.GATHERING, Status.PARSING}:
             # do nothing; post-job tasks will finish and then mark this entry
             # with status "Aborted" and take care of the host
             return
diff --git a/site_utils/collect_suite_time_stats.py b/site_utils/collect_suite_time_stats.py
index bc6c5f8..cdda7dc 100755
--- a/site_utils/collect_suite_time_stats.py
+++ b/site_utils/collect_suite_time_stats.py
@@ -40,7 +40,7 @@
 hqe_finished_time is stored in the "finished_on" of "afe_host_queue_entries"
 table.
 We do not use "job_finished_time" of "tko_jobs" as job_finished_time is
-recorded before gathering/parsing/archiving.
+recorded before gathering/parsing.
 We do not use hqe started time ("started_on" of "afe_host_queue_entries"),
 as it does not account for the lag from a host is assigned to the job till
 the scheduler sees the assignment.