Merge branch 'aosp/mirror-chromium-master' into update_utils am: 4307f4735e am: 18caef1edd am: 6c551e0d52 am: ddfea1f7e7
am: f1a93c225b

Change-Id: I60d258ddb8fd0bf9632f1f70c5cdeb3078938059
diff --git a/afe_lock_machine.py b/afe_lock_machine.py
index 125ac97..f83e897 100755
--- a/afe_lock_machine.py
+++ b/afe_lock_machine.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2015 Google INc.  All Rights Reserved.
 """This module controls locking and unlocking of test machines."""
@@ -65,7 +65,7 @@
   changing the lock status of machines on either server.  For the ChromeOS
   HW Lab, it only allows access to the toolchain team lab machines, as
   defined in toolchain-utils/crosperf/default_remotes.  By default it will
-  look for a local server on chrotomation2.mtv.corp.google.com, but an
+  look for a local server on chrotomation2.svl.corp.google.com, but an
   alternative local AFE server can be supplied, if desired.
 
   !!!IMPORTANT NOTE!!!  The AFE server can only be called from the main
@@ -74,7 +74,7 @@
   in the Python virtual machine (and signal handling) and cannot be changed.
   """
 
-  LOCAL_SERVER = 'chrotomation2.mtv.corp.google.com'
+  LOCAL_SERVER = 'chrotomation2.svl.corp.google.com'
 
   def __init__(self,
                remotes,
@@ -97,6 +97,7 @@
         machines that are not in the ChromeOS HW lab.
       local: A Boolean indicating whether or not to use/allow a local AFE
         server to be used (see local_server argument).
+      use_local: Use the local server instead of the official one.
       log: If not None, this is the logger object to be used for writing out
         informational output messages.  It is expected to be an instance of
         Logger class from cros_utils/logger.py.
@@ -272,9 +273,9 @@
     for m in self.machines:
       for cros_name in [m, m + '.cros']:
         if cros_name in self.toolchain_lab_machines:
-          raise UpdateNonLocalMachine('Machine %s is already in the ChromeOS HW'
-                                      'Lab.  Cannot add it to local server.' %
-                                      cros_name)
+          raise UpdateNonLocalMachine(
+              'Machine %s is already in the ChromeOS HW'
+              'Lab.  Cannot add it to local server.' % cros_name)
       host_info = self.local_afe.get_hosts(hostname=m)
       if host_info:
         raise DuplicateAdd('Machine %s is already on the local server.' % m)
@@ -380,9 +381,10 @@
       afe_server = self.local_afe
 
     try:
-      afe_server.run('modify_hosts',
-                     host_filter_data={'hostname__in': [m]},
-                     update_data=kwargs)
+      afe_server.run(
+          'modify_hosts',
+          host_filter_data={'hostname__in': [m]},
+          update_data=kwargs)
     except Exception as e:
       traceback.print_exc()
       raise LockingError('Unable to %s machine %s. %s' % (action, m, str(e)))
@@ -426,8 +428,9 @@
       if machine.find('.cros') == -1:
         cros_machine = cros_machine + '.cros'
 
-    self.machines = [m for m in self.machines
-                     if m != cros_machine and m != machine]
+    self.machines = [
+        m for m in self.machines if m != cros_machine and m != machine
+    ]
 
   def CheckMachineLocks(self, machine_states, cmd):
     """Check that every machine in requested list is in the proper state.
@@ -456,8 +459,8 @@
                             'else (%s).' % (k, state['locked_by']))
       elif cmd == 'lock':
         if state['locked']:
-          self.logger.LogWarning('Attempt to lock already locked machine (%s)' %
-                                 k)
+          self.logger.LogWarning(
+              'Attempt to lock already locked machine (%s)' % k)
           self._InternalRemoveMachine(k)
 
   def HasAFEServer(self, local):
diff --git a/android_bench_suite/Binder_flags_aosp.diff b/android_bench_suite/Binder_flags_aosp.diff
new file mode 100644
index 0000000..1e3ec6c
--- /dev/null
+++ b/android_bench_suite/Binder_flags_aosp.diff
@@ -0,0 +1,43 @@
+diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
+index f7347aef1..a539fac47 100644
+--- a/libs/binder/Android.bp
++++ b/libs/binder/Android.bp
+@@ -52,10 +52,12 @@ cc_library {
+         "-Wall",
+         "-Wextra",
+         "-Werror",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+     product_variables: {
+         binder32bit: {
+-            cflags: ["-DBINDER_IPC_32BIT=1"],
++            cflags: ["-DBINDER_IPC_32BIT=1",],
+         },
+     },
+ 
+@@ -76,4 +78,22 @@ cc_library {
+     },
+ }
+ 
+-subdirs = ["tests"]
++cc_test {
++    name: "binderThroughputTest",
++    srcs: ["tests/binderThroughputTest.cpp"],
++    shared_libs: [
++        "libbinder",
++        "libutils",
++    ],
++    clang: true,
++    cflags: [
++        "-g",
++        "-Wall",
++        "-Werror",
++        "-Wno-missing-field-initializers",
++        "-Wno-sign-compare",
++	 "-O3",
++        CFLAGS_FOR_BENCH_SUITE
++    ],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++}
++
diff --git a/android_bench_suite/Binder_flags_internal.diff b/android_bench_suite/Binder_flags_internal.diff
new file mode 100644
index 0000000..1e3ec6c
--- /dev/null
+++ b/android_bench_suite/Binder_flags_internal.diff
@@ -0,0 +1,43 @@
+diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
+index f7347aef1..a539fac47 100644
+--- a/libs/binder/Android.bp
++++ b/libs/binder/Android.bp
+@@ -52,10 +52,12 @@ cc_library {
+         "-Wall",
+         "-Wextra",
+         "-Werror",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+     product_variables: {
+         binder32bit: {
+-            cflags: ["-DBINDER_IPC_32BIT=1"],
++            cflags: ["-DBINDER_IPC_32BIT=1",],
+         },
+     },
+ 
+@@ -76,4 +78,22 @@ cc_library {
+     },
+ }
+ 
+-subdirs = ["tests"]
++cc_test {
++    name: "binderThroughputTest",
++    srcs: ["tests/binderThroughputTest.cpp"],
++    shared_libs: [
++        "libbinder",
++        "libutils",
++    ],
++    clang: true,
++    cflags: [
++        "-g",
++        "-Wall",
++        "-Werror",
++        "-Wno-missing-field-initializers",
++        "-Wno-sign-compare",
++	 "-O3",
++        CFLAGS_FOR_BENCH_SUITE
++    ],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++}
++
diff --git a/android_bench_suite/Dex2oat_flags_aosp.diff b/android_bench_suite/Dex2oat_flags_aosp.diff
new file mode 100644
index 0000000..fcd611f
--- /dev/null
+++ b/android_bench_suite/Dex2oat_flags_aosp.diff
@@ -0,0 +1,13 @@
+diff --git a/compiler/Android.bp b/compiler/Android.bp
+index a1269dcaf..a9b62b474 100644
+--- a/compiler/Android.bp
++++ b/compiler/Android.bp
+@@ -215,6 +215,8 @@ art_cc_defaults {
+     shared: {
+         shared_libs: ["libcrypto"],
+     },
++    cflags: [CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+ }
+ 
+ gensrcs {
diff --git a/android_bench_suite/Dex2oat_flags_internal.diff b/android_bench_suite/Dex2oat_flags_internal.diff
new file mode 100644
index 0000000..fcd611f
--- /dev/null
+++ b/android_bench_suite/Dex2oat_flags_internal.diff
@@ -0,0 +1,13 @@
+diff --git a/compiler/Android.bp b/compiler/Android.bp
+index a1269dcaf..a9b62b474 100644
+--- a/compiler/Android.bp
++++ b/compiler/Android.bp
+@@ -215,6 +215,8 @@ art_cc_defaults {
+     shared: {
+         shared_libs: ["libcrypto"],
+     },
++    cflags: [CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+ }
+ 
+ gensrcs {
diff --git a/android_bench_suite/Hwui_flags_aosp.diff b/android_bench_suite/Hwui_flags_aosp.diff
new file mode 100644
index 0000000..9e3b1df
--- /dev/null
+++ b/android_bench_suite/Hwui_flags_aosp.diff
@@ -0,0 +1,50 @@
+diff --git a/libs/hwui/Android.bp b/libs/hwui/Android.bp
+index 558cdc0faf3..1565be5b201 100644
+--- a/libs/hwui/Android.bp
++++ b/libs/hwui/Android.bp
+@@ -24,12 +24,15 @@ cc_defaults {
+         "-Werror",
+         "-fvisibility=hidden",
+         "-DHWUI_NEW_OPS",
++	CFLAGS_FOR_BENCH_SUITE
+ 
+         // GCC false-positives on this warning, and since we -Werror that's
+         // a problem
+         "-Wno-free-nonheap-object",
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     include_dirs: [
+         "external/skia/include/private",
+         "external/skia/src/core",
+@@ -214,6 +217,9 @@ cc_defaults {
+         export_proto_headers: true,
+     },
+ 
++    cflags: [CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     export_include_dirs: ["."],
+ }
+ 
+@@ -229,7 +235,8 @@ cc_library {
+ cc_library_static {
+     name: "libhwui_static_null_gpu",
+     defaults: ["libhwui_defaults"],
+-    cflags: ["-DHWUI_NULL_GPU"],
++    cflags: ["-DHWUI_NULL_GPU", CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+     srcs: [
+         "debug/nullegl.cpp",
+         "debug/nullgles.cpp",
+@@ -319,7 +326,8 @@ cc_benchmark {
+     name: "hwuimicro",
+     defaults: ["hwui_test_defaults"],
+ 
+-    cflags: ["-DHWUI_NULL_GPU"],
++    cflags: ["-DHWUI_NULL_GPU", CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+ 
+     whole_static_libs: ["libhwui_static_null_gpu"],
+
diff --git a/android_bench_suite/Hwui_flags_internal.diff b/android_bench_suite/Hwui_flags_internal.diff
new file mode 100644
index 0000000..16a0222
--- /dev/null
+++ b/android_bench_suite/Hwui_flags_internal.diff
@@ -0,0 +1,72 @@
+diff --git a/libs/hwui/Android.bp b/libs/hwui/Android.bp
+index 303d05f084a..946aa9bb754 100644
+--- a/libs/hwui/Android.bp
++++ b/libs/hwui/Android.bp
+@@ -17,6 +17,7 @@ cc_defaults {
+         "-Wunreachable-code",
+         "-Werror",
+         "-fvisibility=hidden",
++	CFLAGS_FOR_BENCH_SUITE
+ 
+         // GCC false-positives on this warning, and since we -Werror that's
+         // a problem
+@@ -30,6 +31,8 @@ cc_defaults {
+         //"-DANDROID_ENABLE_LINEAR_BLENDING",
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     include_dirs: [
+         "external/skia/include/private",
+         "external/skia/src/core",
+@@ -231,6 +234,9 @@ cc_defaults {
+         export_proto_headers: true,
+     },
+ 
++    cflags: [CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     export_include_dirs: ["."],
+     export_shared_lib_headers: ["libRScpp"],
+ }
+@@ -240,7 +246,7 @@ cc_library {
+     defaults: [
+         "libhwui_defaults",
+ 
+-        // Enables fine-grained GLES error checking
++	// Enables fine-grained GLES error checking
+         // If enabled, every GLES call is wrapped & error checked
+         // Has moderate overhead
+         "hwui_enable_opengl_validation",
+@@ -257,7 +263,8 @@ cc_library_static {
+         "libhwui_defaults",
+         "hwui_debug",
+     ],
+-    cflags: ["-DHWUI_NULL_GPU"],
++    cflags: ["-DHWUI_NULL_GPU", CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+     srcs: [
+         "debug/nullegl.cpp",
+     ],
+@@ -354,6 +361,9 @@ cc_benchmark {
+     whole_static_libs: ["libhwui"],
+     shared_libs: ["libmemunreachable"],
+ 
++    cflags: [CFLAGS_FOR_BENCH_SUITE],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     srcs: [
+         "tests/macrobench/TestSceneRunner.cpp",
+         "tests/macrobench/main.cpp",
+@@ -371,8 +381,11 @@ cc_benchmark {
+     cflags: [
+         "-include debug/wrap_gles.h",
+         "-DHWUI_NULL_GPU",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     whole_static_libs: ["libhwui_static_debug"],
+     shared_libs: ["libmemunreachable"],
+ 
diff --git a/android_bench_suite/README.txt b/android_bench_suite/README.txt
new file mode 100644
index 0000000..3d0cceb
--- /dev/null
+++ b/android_bench_suite/README.txt
@@ -0,0 +1,41 @@
+This is a Android Toolchain benchmark suite.
+===========================================
+Where to find this suite:
+	This suite locates at google3, please create a google3 branch first,
+	then run:
+		$cd experimental/users/zhizhouy/benchtoolchain
+	Copy this directory to the place you want to put it.
+
+To use this suite:
+	1. Configure the basic envrionment in env_setting file.
+
+	2. Run ./apply_patches.py, which will:
+		1) Patch all the android benchmarks in the android tree.
+		Benchmark Panorama does not exist in android tree, so perftests/
+		gets copied into the top-level of android tree.
+
+		2) Apply patch autotest.diff to android_root/external/autotest, which
+		includes all the test scripts for benchmarks. Copy testcases to
+		related autotest directory.
+
+	   If you have applied the patch partially and hope to discard the
+	   patch, just run discard_patches.py
+
+	3. Build and run benchmark on the device using ./run.py. You can either
+	use test configuration file (-t test_config), or set all the variables
+	manually.
+
+	4. The raw results locate at bench_result_* in bench suite home
+	directory.
+
+	5. The JSON format result will be generated for crosperf report.
+
+Utility tools:
+	1. Autotest is a test framework located in android exteranl/autotest
+		Before first time running it, please run
+		utils/build_externals.py first to ensure all the environments
+		and tools needed are installed.
+
+	2. Crosperf is a report generating tool in ChromeOS toolchain utilities.
+		Please look for it in chromium source:
+		src/third_party/toolchain-utils/crosperf.
diff --git a/android_bench_suite/Skia_flags_aosp.diff b/android_bench_suite/Skia_flags_aosp.diff
new file mode 100644
index 0000000..b2ff242
--- /dev/null
+++ b/android_bench_suite/Skia_flags_aosp.diff
@@ -0,0 +1,28 @@
+diff --git a/Android.bp b/Android.bp
+index a581b0a53..36159c5ae 100644
+--- a/Android.bp
++++ b/Android.bp
+@@ -40,8 +40,11 @@ cc_library {
+         "-Wno-clobbered",
+         "-Wno-error",
+         "-fexceptions",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     cppflags: [
+         "-std=c++11",
+         "-fno-threadsafe-statics",
+@@ -1470,8 +1473,11 @@ cc_test {
+         "-U_FORTIFY_SOURCE",
+         "-D_FORTIFY_SOURCE=1",
+         "-DSKIA_IMPLEMENTATION=1",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     cppflags: [
+         "-std=c++11",
+         "-fno-threadsafe-statics",
diff --git a/android_bench_suite/Skia_flags_internal.diff b/android_bench_suite/Skia_flags_internal.diff
new file mode 100644
index 0000000..2eb6a1d
--- /dev/null
+++ b/android_bench_suite/Skia_flags_internal.diff
@@ -0,0 +1,26 @@
+diff --git a/Android.bp b/Android.bp
+index b4e1f5f701..13e1c6645f 100644
+--- a/Android.bp
++++ b/Android.bp
+@@ -9,7 +9,9 @@ cc_library {
+         "-D_FORTIFY_SOURCE=1",
+         "-DSKIA_IMPLEMENTATION=1",
+         "-DATRACE_TAG=ATRACE_TAG_VIEW",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
+ 
+     export_include_dirs: [
+         "include/android/",
+@@ -1603,8 +1605,11 @@ cc_test {
+ 
+     cflags: [
+         "-Wno-unused-parameter",
++	CFLAGS_FOR_BENCH_SUITE
+     ],
+ 
++    ldflags: [LDFLAGS_FOR_BENCH_SUITE],
++
+     local_include_dirs: [
+         "bench/",
+         "experimental/svg/model/",
diff --git a/android_bench_suite/apply_patches.py b/android_bench_suite/apply_patches.py
new file mode 100755
index 0000000..fbe1a8d
--- /dev/null
+++ b/android_bench_suite/apply_patches.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Script to patch Android repo with diffs that are needed by the suite.
+
+Run this script before running the suite.
+"""
+from __future__ import print_function
+
+import config
+import os
+import subprocess
+
+# The patches to be added to the android repo.
+# An error may occur if it is already patched, or meets some error.
+# FIXME: Needs to be FIXED in the future.
+def try_patch_skia():
+  skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
+  # You may want to change the file based on aosp or internal
+  if config.android_type == 'internal':
+    print('No need to patch skia for internal repo.')
+    return
+  elif config.android_type == 'aosp':
+    skia_patch = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), 'skia_aosp.diff')
+  else:
+    raise ValueError('Adnroid source type should be either aosp or internal.')
+  # FIXME: A quick hack, need to handle errors and check whether has been
+  # applied in the future.
+  try:
+    subprocess.check_call(['git', '-C', skia_dir, 'apply', skia_patch])
+    print('Skia patched successfully!')
+  except subprocess.CalledProcessError:
+    print('Skia patch not applied, error or already patched.')
+
+
+def try_patch_autotest():
+  # Patch autotest, which includes all the testcases on device, setting device,
+  # and running the benchmarks
+  autotest_dir = os.path.join(config.android_home, config.autotest_dir)
+  autotest_patch = os.path.join(
+      os.path.dirname(os.path.realpath(__file__)), 'autotest.diff')
+  dex2oat_dir = os.path.join(autotest_dir, 'server/site_tests/android_Dex2oat')
+  panorama_dir = os.path.join(autotest_dir,
+                              'server/site_tests/android_Panorama')
+  # FIXME: A quick hack, need to handle errors and check whether has been
+  # applied in the future.
+  try:
+    subprocess.check_call(['git', '-C', autotest_dir, 'apply', autotest_patch])
+    subprocess.check_call(['cp', '-rf', 'dex2oat_input', dex2oat_dir])
+    subprocess.check_call(['cp', '-rf', 'panorama_input', panorama_dir])
+    print('Autotest patched successfully!')
+  except subprocess.CalledProcessError:
+    print('Autotest patch not applied, error or already patched.')
+
+
+def try_patch_panorama():
+  panorama_dir = os.path.join(config.android_home,
+                              config.bench_dict['Panorama'])
+  panorama_patch = os.path.join(
+      os.path.dirname(os.path.realpath(__file__)), 'panorama.diff')
+  # FIXME: A quick hack, need to handle errors and check whether has been
+  # applied in the future.
+  try:
+    subprocess.check_call(['git', '-C', panorama_dir, 'apply', panorama_patch])
+    print('Panorama patched successfully!')
+  except subprocess.CalledProcessError:
+    print('Panorama patch not applied, error or already patched.')
+
+
+def try_patch_synthmark():
+  synthmark_dir = 'devrel/tools/synthmark'
+  # FIXME: A quick hack, need to handle errors and check whether has been
+  # applied in the future.
+  try:
+    subprocess.check_call([
+        'bash', '-c', 'mkdir devrel && '
+        'cd devrel && '
+        'repo init -u sso://devrel/manifest && '
+        'repo sync tools/synthmark'
+    ])
+    synthmark_patch = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), 'synthmark.diff')
+    subprocess.check_call(['git', '-C', synthmark_dir,
+                           'apply', synthmark_patch])
+
+    subprocess.check_call(['mv', '-f', synthmark_dir, config.android_home])
+    subprocess.check_call(['rm', '-rf', 'devrel'])
+    print('Synthmark patched successfully!')
+  except subprocess.CalledProcessError:
+    print('Synthmark patch not applied, error or already patched.')
+
+
+def main():
+  try_patch_skia()
+  try_patch_autotest()
+  try_patch_panorama()
+  try_patch_synthmark()
+
+
+if __name__ == '__main__':
+  main()
diff --git a/android_bench_suite/autotest.diff b/android_bench_suite/autotest.diff
new file mode 100644
index 0000000..ef0029a
--- /dev/null
+++ b/android_bench_suite/autotest.diff
@@ -0,0 +1,1057 @@
+diff --git a/server/site_tests/android_Binder/android_Binder.py b/server/site_tests/android_Binder/android_Binder.py
+new file mode 100644
+index 000000000..b233b586a
+--- /dev/null
++++ b/server/site_tests/android_Binder/android_Binder.py
+@@ -0,0 +1,57 @@
++# Tests for android Binder
++from __future__ import print_function
++
++import bench_config
++import logging
++import os
++import re
++
++from autotest_lib.server import test
++
++class android_Binder(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/' + bench_config.product)
++
++        # Set binary directories
++        lib_dir = os.path.join(out_dir, 'system/lib/libbinder.so')
++        lib_dir_DUT = '/system/lib/libbinder.so'
++        lib64_dir = os.path.join(out_dir, 'system/lib64/libbinder.so')
++        lib64_dir_DUT = '/system/lib64/libbinder.so'
++        bench_dir = os.path.join(out_dir,
++                                 'symbols/data/nativetest64',
++                                 'binderThroughputTest/binderThroughputTest')
++        bench_dir_DUT = os.path.join('/data/local/tmp',
++                                     'binderThroughputTest')
++
++        # Push binary to the device
++        print('Pushing binaries of Binder benchmark onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++        host.send_file(lib_dir, lib_dir_DUT, delete_dest=True)
++        host.send_file(lib64_dir, lib64_dir_DUT, delete_dest=True)
++
++        # Make sure the binary is executable
++        self.client.run('chmod u+x ' + bench_dir_DUT)
++
++        print('Running tests on the device...')
++        # First run creates bench_result
++        self.client.run('taskset %s /data/local/tmp/'
++                        'binderThroughputTest > /data/local/tmp/bench_result'
++                        % os.getenv('TEST_MODE'))
++        # Next 4 runs add to bench_result
++        for i in xrange(4):
++          self.client.run('taskset %s /data/local/tmp/'
++                          'binderThroughputTest >> '
++                          '/data/local/tmp/bench_result'
++                          % os.getenv('TEST_MODE'))
++
++        # Pull result from the device
++        out_dir = bench_config.bench_suite_dir
++        result_dir_DUT = '/data/local/tmp/bench_result'
++
++        host.get_file(result_dir_DUT, out_dir, delete_dest=True)
++        print('Result has been pulled back to file bench_result!')
+diff --git a/server/site_tests/android_Binder/bench_config.py b/server/site_tests/android_Binder/bench_config.py
+new file mode 100644
+index 000000000..20f685eb9
+--- /dev/null
++++ b/server/site_tests/android_Binder/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++synthmark_dir = 'framework/native/libs/binder'
++
++real_synthmark_dir = os.path.join(android_home, synthmark_dir)
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Binder/control b/server/site_tests/android_Binder/control
+new file mode 100644
+index 000000000..d91854b11
+--- /dev/null
++++ b/server/site_tests/android_Binder/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Binder"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "application"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_binder_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Binder", host=host)
++
++parallel_simple(run_binder_test, machines)
+diff --git a/server/site_tests/android_Dex2oat/android_Dex2oat.py b/server/site_tests/android_Dex2oat/android_Dex2oat.py
+new file mode 100644
+index 000000000..dd6af0b53
+--- /dev/null
++++ b/server/site_tests/android_Dex2oat/android_Dex2oat.py
+@@ -0,0 +1,70 @@
++# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
++# Use of this source code is governed by a BSD-style license that can be
++# found in the LICENSE file.
++
++import bench_config
++import time
++import logging
++import os
++import re
++
++from autotest_lib.client.common_lib import error
++from autotest_lib.server import test
++
++class android_Dex2oat(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/',
++                               bench_config.product)
++
++        # Set binary directories
++        bench_dir = os.path.join(out_dir, 'system/lib/libart-compiler.so')
++        bench_dir_DUT = '/system/lib/libart-compiler.so'
++        bench64_dir = os.path.join(out_dir, 'system/lib64/libart-compiler.so')
++        bench64_dir_DUT = '/system/lib64/libart-compiler.so'
++
++        # Push libart-compiler.so to the device
++        print('Pushing binaries of newly generated library onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++        host.send_file(bench64_dir, bench64_dir_DUT, delete_dest=True)
++
++        # Set testcase directories
++        test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
++                                 'dex2oat_input')
++        test_dir_DUT = '/data/local/tmp/'
++
++        # Push testcases to the device
++        print('Pushing tests onto device!')
++        host.send_file(test_dir, test_dir_DUT, delete_dest=True)
++
++        # Open file to write the result
++        with open(os.path.join(bench_config.bench_suite_dir,
++                               'bench_result'), 'w') as f:
++
++          # There are two benchmarks, chrome and camera.
++          for i in xrange(2):
++            f.write('Test %d:\n' % i)
++            total_time = 0
++            # Run benchmark for several times for accurancy
++            for j in xrange(3):
++              f.write('Iteration %d: ' % j)
++              result = self.client.run('time taskset %s dex2oat'
++                         ' --dex-file=data/local/tmp/dex2oat_input/test%d.apk'
++                         ' --oat-file=data/local/tmp/dex2oat_input/test%d.oat'
++                         % (os.getenv('TEST_MODE'), i+1, i+1))
++              # Find and record real time of the run
++              time_str = ''
++              for t in result.stdout.split() + result.stderr.split():
++                if 'm' in t and 's' in t:
++                  time_str = t.split('m')
++                  break
++              time_sec = float(time_str[0]) * 60
++              time_sec += float(time_str[1].split('s')[0])
++              f.write('User Time: %.2f seconds\n' % time_sec)
++              total_time += time_sec
++
++            f.write('Total elapsed time: %.2f seconds.\n\n' % total_time)
+diff --git a/server/site_tests/android_Dex2oat/bench_config.py b/server/site_tests/android_Dex2oat/bench_config.py
+new file mode 100644
+index 000000000..d2855f22c
+--- /dev/null
++++ b/server/site_tests/android_Dex2oat/bench_config.py
+@@ -0,0 +1,15 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Dex2oat/control b/server/site_tests/android_Dex2oat/control
+new file mode 100644
+index 000000000..763864f3a
+--- /dev/null
++++ b/server/site_tests/android_Dex2oat/control
+@@ -0,0 +1,21 @@
++# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
++# Use of this source code is governed by a BSD-style license that can be
++# found in the LICENSE file.
++
++NAME = "Dex2oat"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "SHORT"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "kernel"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_dex2oat(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Dex2oat", host=host)
++
++parallel_simple(run_dex2oat, machines)
+diff --git a/server/site_tests/android_Hwui/android_Hwui.py b/server/site_tests/android_Hwui/android_Hwui.py
+new file mode 100644
+index 000000000..d1837e042
+--- /dev/null
++++ b/server/site_tests/android_Hwui/android_Hwui.py
+@@ -0,0 +1,67 @@
++# Tests for android Hwui
++from __future__ import print_function
++
++import bench_config
++import logging
++import os
++import re
++
++from autotest_lib.server import test
++
++class android_Hwui(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/' + bench_config.product)
++
++        lib_dir = os.path.join(out_dir, 'system/lib/libhwui.so')
++        lib_dir_DUT = '/system/lib/libhwui.so'
++        lib64_dir = os.path.join(out_dir, 'system/lib64/libhwui.so')
++        lib64_dir_DUT = '/system/lib64/libhwui.so'
++        bench_dir = os.path.join(out_dir,
++                                 'symbols/data/nativetest64/',
++                                 'hwuimicro/hwuimicro')
++        bench_dir_DUT = '/data/local/tmp/hwuimicro'
++
++        # Push binary to the device
++        print('Pushing Hwui benchmark onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++        host.send_file(lib_dir, lib_dir_DUT, delete_dest=True)
++        host.send_file(lib64_dir, lib64_dir_DUT, delete_dest=True)
++
++        # Make sure the binary is executable
++        self.client.run('chmod u+x ' + bench_dir_DUT)
++
++
++        print('Running tests on the device...')
++        self.client.run('taskset %s /data/local/tmp/hwuimicro'
++                        ' > /data/local/tmp/bench_result'
++                        % os.getenv('TEST_MODE'))
++
++        # Pull result from the device
++        out_dir = bench_config.bench_suite_dir
++        result_dir_DUT = '/data/local/tmp/bench_result'
++
++        host.get_file(result_dir_DUT, out_dir, delete_dest=True)
++
++        # Update total time of the test
++        t = 0
++        with open(os.path.join(out_dir, 'bench_result'), 'r') as fin:
++
++          for lines in fin:
++            line = lines.split()
++            print(line)
++
++            # Check if there is test result in this line
++            if len(line) == 8:
++              # Accumulate the Run time for the testcase
++              t += int(line[2])
++
++        # Append total time to the file
++        with open(os.path.join(out_dir, 'bench_result'), 'a') as fout:
++          fout.write('\nTotal elapsed time: %d ns.\n' % t)
++
++        print('Result has been pulled back to file bench_result!')
+diff --git a/server/site_tests/android_Hwui/bench_config.py b/server/site_tests/android_Hwui/bench_config.py
+new file mode 100644
+index 000000000..a98d259f9
+--- /dev/null
++++ b/server/site_tests/android_Hwui/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++hwui_dir = 'frameworks/base/libs/hwui/'
++
++real_hwui_dir = os.path.join(android_home, hwui_dir)
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Hwui/control b/server/site_tests/android_Hwui/control
+new file mode 100644
+index 000000000..89c47da20
+--- /dev/null
++++ b/server/site_tests/android_Hwui/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Hwui"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "library"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_hwui_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Hwui", host=host)
++
++parallel_simple(run_hwui_test, machines)
+diff --git a/server/site_tests/android_Panorama/android_Panorama.py b/server/site_tests/android_Panorama/android_Panorama.py
+new file mode 100644
+index 000000000..db2a29cde
+--- /dev/null
++++ b/server/site_tests/android_Panorama/android_Panorama.py
+@@ -0,0 +1,53 @@
++# Tests for android Panorama
++from __future__ import print_function
++
++import bench_config
++import logging
++import os
++import re
++
++from autotest_lib.server import test
++
++class android_Panorama(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/' + bench_config.product)
++
++        # Set binary directories
++        bench_dir = os.path.join(out_dir,
++                                 'data/local/tmp/panorama_bench64')
++        bench_dir_DUT = '/data/local/tmp/panorama_bench64'
++
++        # Set tests directories
++        tests_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
++                                                 'panorama_input')
++        tests_dir_DUT = '/data/local/tmp/panorama_input/'
++
++        # Push binary to the device
++        print('Pushing binaries of Panorama benchmark onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++
++        # Make sure the binary is executable
++        self.client.run('chmod u+x ' + bench_dir_DUT)
++
++        # Push testcases to the device
++        print('Pushing tests onto device!')
++        host.send_file(tests_dir, tests_dir_DUT, delete_dest=True)
++
++        print('Running tests on the device...')
++        self.client.run('taskset %s /data/local/tmp/panorama_bench64 '
++                        '/data/local/tmp/panorama_input/panorama_input/test '
++                        '/data/local/tmp/panorama.ppm'
++                        ' > /data/local/tmp/bench_result'
++                        % os.getenv('TEST_MODE'))
++
++        # Pull result from the device
++        out_dir = bench_config.bench_suite_dir
++        result_dir_DUT = '/data/local/tmp/bench_result'
++
++        host.get_file(result_dir_DUT, out_dir, delete_dest=True)
++        print('Result has been pulled back to file bench_result!')
+diff --git a/server/site_tests/android_Panorama/bench_config.py b/server/site_tests/android_Panorama/bench_config.py
+new file mode 100644
+index 000000000..075beec76
+--- /dev/null
++++ b/server/site_tests/android_Panorama/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++panorama_dir = 'perftests/panorama/'
++
++real_panorama_dir = os.path.join(android_home, panorama_dir)
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Panorama/control b/server/site_tests/android_Panorama/control
+new file mode 100644
+index 000000000..3cd589eed
+--- /dev/null
++++ b/server/site_tests/android_Panorama/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Panorama"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "application"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_panorama_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Panorama", host=host)
++
++parallel_simple(run_panorama_test, machines)
+diff --git a/server/site_tests/android_SetDevice/android_SetDevice.py b/server/site_tests/android_SetDevice/android_SetDevice.py
+new file mode 100644
+index 000000000..7a7134d58
+--- /dev/null
++++ b/server/site_tests/android_SetDevice/android_SetDevice.py
+@@ -0,0 +1,77 @@
++# Set device modes such as cpu frequency
++from __future__ import print_function
++
++import logging
++import os
++import re
++import time
++
++from autotest_lib.server import test
++
++def _get_cat_value(result):
++    return result.stdout.split('\n')[0]
++
++class android_SetDevice(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        # Disable GPU
++        self.client.run('setprop debug.rs.default-GPU-driver 1')
++
++        # Freeze system
++        # Stop perfd, mpdecision and thermal-engine to ensure setting runs
++        # without unexpected errors.
++        self.client.run('stop thermal-engine')
++        self.client.run('stop mpdecision')
++        self.client.run('stop perfd')
++
++        # Set airplane mode on the device
++        self.client.run('settings put global airplane_mode_on 1')
++
++        print('Setting frequency on the device...')
++        frequency = os.getenv('FREQUENCY')
++
++        # Get number of cores on device
++        result = self.client.run('ls /sys/devices/system/cpu/ '
++                                 '| grep cpu[0-9].*')
++        cores = result.stdout.splitlines()
++        for core in cores:
++          if core.startswith('cpu'):
++            # First set all cores online
++            online = os.path.join('/sys/devices/system/cpu', core, 'online')
++            online_status = _get_cat_value(self.client.run('cat %s' % online))
++            if online_status == '0':
++              self.client.run('echo %s > %s' % ('1', online))
++
++            freq_path = os.path.join('/sys/devices/system/cpu', core,
++                                     'cpufreq')
++
++            # Check if the frequency user entered is legal or not.
++            available_freq = self.client.run('cat %s/'
++                                             'scaling_available_frequencies'
++                                             % (freq_path))
++            available_freq_list = _get_cat_value(available_freq).split()
++
++            if frequency not in available_freq_list:
++              raise ValueError('Wrong freqeuncy input, '
++                               'please select from: \n%s'
++                               % (' '.join(available_freq_list)))
++
++            # Set frequency
++            self.client.run('echo %s > %s/scaling_min_freq'
++                            % (frequency, freq_path))
++            self.client.run('echo %s > %s/scaling_max_freq'
++                            % (frequency, freq_path))
++
++            # Sleep for 2 seconds, let device update the frequency.
++            time.sleep(2)
++
++            # Get current frequency
++            freq = self.client.run('cat %s/cpuinfo_cur_freq' % freq_path)
++            f = _get_cat_value(freq)
++            if f != frequency:
++              raise RuntimeError('Expected frequency for %s to be %s, '
++                                 'but is %s' % (core, frequency, f))
++            print('CPU frequency has been set to %s' % (frequency))
+diff --git a/server/site_tests/android_SetDevice/control b/server/site_tests/android_SetDevice/control
+new file mode 100644
+index 000000000..85163706d
+--- /dev/null
++++ b/server/site_tests/android_SetDevice/control
+@@ -0,0 +1,19 @@
++# Control
++
++NAME = "SetDevice"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "application"
++TEST_TYPE = "server"
++
++DOC = """
++Set the core frequency and which core online for devices.
++"""
++
++def run_set_device_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_SetDevice", host=host)
++
++parallel_simple(run_set_device_test, machines)
+diff --git a/server/site_tests/android_Skia/android_Skia.py b/server/site_tests/android_Skia/android_Skia.py
+new file mode 100644
+index 000000000..fc8d09dab
+--- /dev/null
++++ b/server/site_tests/android_Skia/android_Skia.py
+@@ -0,0 +1,65 @@
++# Tests for android Skia
++from __future__ import print_function
++
++import bench_config
++import logging
++import os
++import re
++
++from autotest_lib.server import test
++
++class android_Skia(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/' + bench_config.product)
++
++        # Set binary directories
++        lib_dir = os.path.join(out_dir, 'system/lib/libskia.so')
++        lib_dir_DUT = '/system/lib/libskia.so'
++        lib64_dir = os.path.join(out_dir, 'system/lib64/libskia.so')
++        lib64_dir_DUT = '/system/lib64/libskia.so'
++        bench_dir = os.path.join(out_dir,
++                                 'data/nativetest64/',
++                                 'skia_nanobench/skia_nanobench')
++        bench_dir_DUT = '/data/local/tmp/skia_nanobench'
++
++        # Push binary to the device
++        print('Pushing Skia benchmark onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++        host.send_file(lib_dir, lib_dir_DUT, delete_dest=True)
++        host.send_file(lib64_dir, lib64_dir_DUT, delete_dest=True)
++
++        # Make sure the binary is executable
++        self.client.run('chmod u+x ' + bench_dir_DUT)
++
++        # Set resource directory
++        resource_dir = os.path.join(bench_config.real_skia_dir, 'resources')
++        resource_dir_DUT = '/data/local/tmp/skia_resources/'
++
++        # Push binary to the device
++        print('Pushing Skia resources onto device!')
++        host.send_file(resource_dir, resource_dir_DUT, delete_dest=True)
++
++        # Run tests
++        print('Running tests on the device...')
++        try:
++          self.client.run('taskset %s ./data/local/tmp/skia_nanobench'
++                          ' --outResultsFile /data/local/tmp/bench_result'
++                          ' --samples 25'
++                          ' --config nonrendering'
++                          % os.getenv('TEST_MODE'))
++        except:
++          # Ignore Abort caused failure
++          None
++
++        # Pull result from the device
++        out_dir = bench_config.bench_suite_dir
++        result_dir_DUT = '/data/local/tmp/bench_result'
++
++        host.get_file(result_dir_DUT, out_dir, delete_dest=True)
++
++        print('Result has been pulled back to file bench_result!')
+diff --git a/server/site_tests/android_Skia/bench_config.py b/server/site_tests/android_Skia/bench_config.py
+new file mode 100644
+index 000000000..5d38d452f
+--- /dev/null
++++ b/server/site_tests/android_Skia/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++skia_dir = 'external/skia'
++
++real_skia_dir = os.path.join(android_home, skia_dir)
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Skia/control b/server/site_tests/android_Skia/control
+new file mode 100644
+index 000000000..e38195a8c
+--- /dev/null
++++ b/server/site_tests/android_Skia/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Skia"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "library"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_skia_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Skia", host=host)
++
++parallel_simple(run_skia_test, machines)
+diff --git a/server/site_tests/android_Synthmark/android_Synthmark.py b/server/site_tests/android_Synthmark/android_Synthmark.py
+new file mode 100644
+index 000000000..b317bd0f3
+--- /dev/null
++++ b/server/site_tests/android_Synthmark/android_Synthmark.py
+@@ -0,0 +1,48 @@
++# Tests for android Synthmark
++from __future__ import print_function
++
++import bench_config
++import logging
++import os
++import re
++
++from autotest_lib.server import test
++
++class android_Synthmark(test.test):
++    version = 1
++
++    def run_once(self, host=None):
++        self.client = host
++
++        out_dir = os.path.join(bench_config.android_home,
++                              'out/target/product/' + bench_config.product)
++
++        # Set binary directories
++        bench_dir = os.path.join(out_dir,
++                                 'symbols/system/bin/synthmark')
++        bench_dir_DUT = '/data/local/tmp/synthmark'
++
++        # Push binary to the device
++        print('Pushing binaries of Synthmark benchmark onto device!')
++        host.send_file(bench_dir, bench_dir_DUT, delete_dest=True)
++
++        # Make sure the binary is executable
++        self.client.run('chmod u+x ' + bench_dir_DUT)
++
++        print('Running tests on the device...')
++        # First run creates bench_result
++        self.client.run('taskset %s /data/local/tmp/synthmark'
++                        ' > /data/local/tmp/bench_result'
++                        % os.getenv('TEST_MODE'))
++        # Next 4 runs add to bench_result
++        for i in xrange(4):
++          self.client.run('taskset %s /data/local/tmp/synthmark'
++                          ' >> /data/local/tmp/bench_result'
++                          % os.getenv('TEST_MODE'))
++
++        # Pull result from the device
++        out_dir = bench_config.bench_suite_dir
++        result_dir_DUT = '/data/local/tmp/bench_result'
++
++        host.get_file(result_dir_DUT, out_dir, delete_dest=True)
++        print('Result has been pulled back to file bench_result!')
+diff --git a/server/site_tests/android_Synthmark/bench_config.py b/server/site_tests/android_Synthmark/bench_config.py
+new file mode 100644
+index 000000000..7d7aacacd
+--- /dev/null
++++ b/server/site_tests/android_Synthmark/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++                         default=os.path.join(home,
++                                 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++                            default=os.path.join(android_home,
++                                                 'benchtoolchain'))
++
++synthmark_dir = 'synthmark'
++
++real_synthmark_dir = os.path.join(android_home, synthmark_dir)
++
++out_dir = os.path.join(android_home, 'out')
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Synthmark/control b/server/site_tests/android_Synthmark/control
+new file mode 100644
+index 000000000..144766351
+--- /dev/null
++++ b/server/site_tests/android_Synthmark/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Synthmark"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "application"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_synthmark_test(machine):
++    host = hosts.create_host(machine)
++    job.run_test("android_Synthmark", host=host)
++
++parallel_simple(run_synthmark_test, machines)
+diff --git a/site_utils/set_device.py b/site_utils/set_device.py
+new file mode 100755
+index 000000000..abb8a8dcc
+--- /dev/null
++++ b/site_utils/set_device.py
+@@ -0,0 +1,110 @@
++#!/usr/bin/python
++from __future__ import print_function
++
++import argparse
++import common
++import logging
++import os
++import sys
++
++# Turn the logging level to INFO before importing other autotest code, to avoid
++# having failed import logging messages confuse the test_droid user.
++logging.basicConfig(level=logging.INFO)
++
++# Unfortunately, autotest depends on external packages for assorted
++# functionality regardless of whether or not it is needed in a particular
++# context.  Since we can't depend on people to import these utilities in any
++# principled way, we dynamically download code before any autotest imports.
++try:
++    import chromite.lib.terminal  # pylint: disable=unused-import
++    import django.http  # pylint: disable=unused-import
++except ImportError:
++    # Ensure the chromite site-package is installed.
++    import subprocess
++    build_externals_path = os.path.join(
++            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
++            'utils', 'build_externals.py')
++    subprocess.check_call([build_externals_path, '--names_to_check',
++                           'chromiterepo', 'django'])
++    # Restart the script so python now finds the autotest site-packages.
++    sys.exit(os.execv(__file__, sys.argv))
++
++from autotest_lib.client.common_lib import utils
++from autotest_lib.server.hosts import adb_host
++from autotest_lib.site_utils import test_runner_utils
++from autotest_lib.site_utils import tester_feedback
++
++def _parse_arguments_internal(argv):
++    """
++    Parse command line arguments
++
++    @param argv: argument list to parse
++
++    @returns:    tuple of parsed arguments and argv suitable for remote runs
++
++    @raises SystemExit if arguments are malformed, or required arguments
++            are not present.
++    """
++
++    parser = argparse.ArgumentParser(description='Set device cpu cores and '
++                                                 'frequency.')
++
++    parser.add_argument('-s', '--serials', metavar='SERIALS',
++                        help='Comma separate list of device serials under '
++                             'test.')
++    parser.add_argument('-r', '--remote', metavar='REMOTE',
++                        default='localhost',
++                        help='hostname[:port] if the ADB device is connected '
++                             'to a remote machine. Ensure this workstation '
++                             'is configured for passwordless ssh access as '
++                             'users "root" or "adb"')
++    parser.add_argument('-q', '--frequency', type=int, default=960000,
++                        help='Specify the CPU frequency of the device, lower '
++                             'frequency will slow down the performance but '
++                             'reduce noise.')
++
++    return parser.parse_args(argv)
++
++def main(argv):
++    """
++    Entry point for set_device script.
++
++    @param argv: arguments list
++    """
++    arguments = _parse_arguments_internal(argv)
++
++    serials = arguments.serials
++    if serials is None:
++        result = utils.run(['adb', 'devices'])
++        devices = adb_host.ADBHost.parse_device_serials(result.stdout)
++        if len(devices) != 1:
++            logging.error('Could not detect exactly one device; please select '
++                          'one with -s: %s', devices)
++            return 1
++        serials = devices[0]
++
++    autotest_path = os.path.dirname(os.path.dirname(
++            os.path.realpath(__file__)))
++    site_utils_path = os.path.join(autotest_path, 'site_utils')
++    realpath = os.path.realpath(__file__)
++    site_utils_path = os.path.realpath(site_utils_path)
++    host_attributes = {'serials': serials,
++                       'os_type': 'android'}
++    results_directory = test_runner_utils.create_results_directory(None)
++
++    logging.info('Start setting CPU frequency on the device...')
++
++    os.environ['FREQUENCY'] = str(arguments.frequency)
++
++    set_device = ['SetDevice']
++    if test_runner_utils.perform_run_from_autotest_root(
++                      autotest_path, argv, set_device, arguments.remote,
++                      host_attributes=host_attributes,
++                      results_directory=results_directory):
++      logging.error('Error while setting device!')
++      return 1
++
++    return 0
++
++if __name__ == '__main__':
++    sys.exit(main(sys.argv[1:]))
+diff --git a/site_utils/test_bench.py b/site_utils/test_bench.py
+new file mode 100755
+index 000000000..4d0773ad9
+--- /dev/null
++++ b/site_utils/test_bench.py
+@@ -0,0 +1,133 @@
++#!/usr/bin/python
++from __future__ import print_function
++
++import argparse
++import common
++import logging
++import os
++import sys
++
++# Turn the logging level to INFO before importing other autotest
++# code, to avoid having failed import logging messages confuse the
++# test_droid user.
++logging.basicConfig(level=logging.INFO)
++
++# Unfortunately, autotest depends on external packages for assorted
++# functionality regardless of whether or not it is needed in a particular
++# context.
++# Since we can't depend on people to import these utilities in any principled
++# way, we dynamically download code before any autotest imports.
++try:
++    import chromite.lib.terminal  # pylint: disable=unused-import
++    import django.http  # pylint: disable=unused-import
++except ImportError:
++    # Ensure the chromite site-package is installed.
++    import subprocess
++    build_externals_path = os.path.join(
++            os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
++            'utils', 'build_externals.py')
++    subprocess.check_call([build_externals_path, '--names_to_check',
++                           'chromiterepo', 'django'])
++    # Restart the script so python now finds the autotest site-packages.
++    sys.exit(os.execv(__file__, sys.argv))
++
++from autotest_lib.client.common_lib import utils
++from autotest_lib.server.hosts import adb_host
++from autotest_lib.site_utils import test_runner_utils
++from autotest_lib.site_utils import tester_feedback
++
++def _parse_arguments_internal(argv):
++    """
++    Parse command line arguments
++
++    @param argv: argument list to parse
++
++    @returns:    tuple of parsed arguments and argv suitable for remote runs
++
++    @raises SystemExit if arguments are malformed, or required arguments
++            are not present.
++    """
++
++    parser = argparse.ArgumentParser(description='Run remote tests.')
++
++    parser.add_argument('-b', '--bench', metavar='BENCH', required=True,
++                        help='Select the benchmark want to be run for '
++                             'test.')
++    parser.add_argument('-s', '--serials', metavar='SERIALS',
++                        help='Comma separate list of device serials under '
++                             'test.')
++    parser.add_argument('-r', '--remote', metavar='REMOTE',
++                        default='localhost',
++                        help='hostname[:port] if the ADB device is connected '
++                             'to a remote machine. Ensure this workstation '
++                             'is configured for passwordless ssh access as '
++                             'users "root" or "adb"')
++    parser.add_argument('-m', '--mode', default='little',
++                        help='Two modes can be chosen, little mode runs on a '
++                             'single core of Cortex-A53, while big mode runs '
++                             'on single core of Cortex-A57.')
++
++    return parser.parse_args(argv)
++
++def main(argv):
++    """
++    Entry point for test_bench script.
++
++    @param argv: arguments list
++    """
++    arguments = _parse_arguments_internal(argv)
++
++    serials = arguments.serials
++    if serials is None:
++        result = utils.run(['adb', 'devices'])
++        devices = adb_host.ADBHost.parse_device_serials(result.stdout)
++        if len(devices) != 1:
++            logging.error('Could not detect exactly one device; please select '
++                          'one with -s: %s', devices)
++            return 1
++        serials = devices[0]
++
++    autotest_path = os.path.dirname(os.path.dirname(
++            os.path.realpath(__file__)))
++    site_utils_path = os.path.join(autotest_path, 'site_utils')
++    realpath = os.path.realpath(__file__)
++    site_utils_path = os.path.realpath(site_utils_path)
++    host_attributes = {'serials': serials,
++                       'os_type': 'android'}
++    results_directory = test_runner_utils.create_results_directory(None)
++
++    bench = arguments.bench
++
++    benchlist = ['Panorama', 'Skia', 'Dex2oat', 'Hwui', "Synthmark", "Binder"]
++
++    logging.info('Start testing benchmark on the device...')
++
++    if bench not in benchlist:
++        logging.error('Please select one benchmark from the list below: \n%s',
++                      '\n'.join(benchlist))
++        return 1
++
++    # Use taskset command to run benchmarks with different CPU core settings.
++    #
++    # TEST_MODE variable is set to either 7 or 56 for coremask in taskset.
++    #
++    # While Nexus 6P has 8 cores and 5X has 6 cores. CPU number 0-3 in both
++    # devices belongs to Cortex 53, which are slow. CPU number 4-5 in 5X and 4-7
++    # in 6P belongs to Cortex 57, which are fast.
++    #
++    # So we set 7(0x00000111) for little mode, that runs the benchmark on three
++    # slow cores; 56(0x00111000) for big mode, that runs the benchmark on two
++    # fast and one slow cores.
++    os.environ['TEST_MODE'] = '7' if arguments.mode == 'little' else '56'
++
++    tests = [bench]
++
++    if test_runner_utils.perform_run_from_autotest_root(
++                      autotest_path, argv, tests, arguments.remote,
++                      host_attributes=host_attributes,
++                      results_directory=results_directory):
++      logging.error('Error while testing on device.')
++      return 1
++
++if __name__ == '__main__':
++    sys.exit(main(sys.argv[1:]))
diff --git a/android_bench_suite/build_bench.py b/android_bench_suite/build_bench.py
new file mode 100755
index 0000000..44ad7a0
--- /dev/null
+++ b/android_bench_suite/build_bench.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# pylint: disable=cros-logging-import
+
+"""Script to build the benchmark locally with toolchain settings."""
+from __future__ import print_function
+
+import argparse
+import config
+import logging
+import os
+import subprocess
+import sys
+
+# Turn the logging level to INFO before importing other code, to avoid having
+# failed import logging messages confuse the user.
+logging.basicConfig(level=logging.INFO)
+
+
+def _parse_arguments_internal(argv):
+  parser = argparse.ArgumentParser(description='Build benchmarks with '
+                                   'specified toolchain settings')
+
+  parser.add_argument(
+      '-b', '--bench', required=True, help='Select the benchmark to be built.')
+
+  parser.add_argument(
+      '-c',
+      '--compiler_dir',
+      metavar='DIR',
+      help='Specify the path to the compiler bin '
+      'directory.')
+
+  parser.add_argument(
+      '-o', '--build_os', help='Specify the host OS to build benchmark.')
+
+  parser.add_argument(
+      '-l',
+      '--llvm_prebuilts_version',
+      help='Specify the version of prebuilt LLVM.')
+
+  parser.add_argument(
+      '-f',
+      '--cflags',
+      help='Specify the optimization cflags for '
+      'the toolchain.')
+
+  parser.add_argument(
+      '--ldflags', help='Specify linker flags for the toolchain.')
+
+  return parser.parse_args(argv)
+
+
+# Set flags for compiling benchmarks, by changing the local
+# CFLAGS/LDFLAGS in the android makefile of each benchmark
+def set_flags(bench, cflags, ldflags):
+  if not cflags:
+    logging.info('No CFLAGS specified, using default settings.')
+    cflags = ''
+  else:
+    logging.info('Cflags setting to "%s"...', cflags)
+
+  if not ldflags:
+    logging.info('No LDFLAGS specifed, using default settings.')
+    ldflags = ''
+  else:
+    logging.info('Ldflags setting to "%s"...', ldflags)
+
+  add_flags = config.bench_flags_dict[bench]
+  add_flags(cflags, ldflags)
+  logging.info('Flags set successfully!')
+
+
+def set_build_os(build_os):
+  # Set $BUILD_OS variable for android makefile
+  if build_os:
+    os.environ['BUILD_OS'] = build_os
+    logging.info('BUILD_OS set to "%s"...', build_os)
+  else:
+    logging.info('No BUILD_OS specified, using linux as default...')
+
+
+def set_llvm_prebuilts_version(llvm_prebuilts_version):
+  # Set $LLVM_PREBUILTS_VERSION for android makefile
+  if llvm_prebuilts_version:
+    os.environ['LLVM_PREBUILTS_VERSION'] = llvm_prebuilts_version
+    logging.info('LLVM_PREBUILTS_VERSION set to "%s"...',
+                 llvm_prebuilts_version)
+  else:
+    logging.info('No LLVM_PREBUILTS_VERSION specified, using default one...')
+
+
+def set_compiler(compiler):
+  # If compiler_dir has been specified, copy the binaries to
+  # a temporary location, set BUILD_OS and LLVM_PREBUILTS_VERSION
+  # variables to the location
+  if compiler:
+    # Report error if path not exits
+    if not os.path.isdir(compiler):
+      logging.error('Error while setting compiler: '
+                    'Directory %s does not exist!', compiler)
+      raise OSError('Directory %s not exist.' % compiler)
+
+    # Specify temporary directory for compiler
+    tmp_dir = os.path.join(config.android_home,
+                           'prebuilts/clang/host/linux-x86', 'clang-tmp')
+
+    compiler_content = os.path.join(compiler, '.')
+
+    # Copy compiler to new directory
+    try:
+      subprocess.check_call(['cp', '-rf', compiler_content, tmp_dir])
+    except subprocess.CalledProcessError:
+      logging.error('Error while copying the compiler to '
+                    'temporary directory %s!', tmp_dir)
+      raise
+
+    # Set environment variable
+    os.environ['LLVM_PREBUILTS_VERSION'] = 'clang-tmp'
+
+    logging.info('Prebuilt Compiler set as %s.', os.path.abspath(compiler))
+
+
+def set_compiler_env(bench, compiler, build_os, llvm_prebuilts_version, cflags,
+                     ldflags):
+  logging.info('Setting compiler options for benchmark...')
+
+  # If no specific prebuilt compiler directory, use BUILD_OS and
+  # LLVM_PREBUILTS_VERSION to set the compiler version.
+  # Otherwise, use the new prebuilt compiler.
+  if not compiler:
+    set_build_os(build_os)
+    set_llvm_prebuilts_version(llvm_prebuilts_version)
+  else:
+    set_compiler(compiler)
+
+  set_flags(bench, cflags, ldflags)
+
+  return 0
+
+
+def remove_tmp_dir():
+  tmp_dir = os.path.join(config.android_home, 'prebuilts/clang/host/linux-x86',
+                         'clang-tmp')
+
+  try:
+    subprocess.check_call(['rm', '-r', tmp_dir])
+  except subprocess.CalledProcessError:
+    logging.error('Error while removing the temporary '
+                  'compiler directory %s!', tmp_dir)
+    raise
+
+
+# Recover the makefile/blueprint from our patch after building
+def restore_makefile(bench):
+  pwd = os.path.join(config.android_home, config.bench_dict[bench])
+  mk_file = os.path.join(pwd, 'Android.mk')
+  if not os.path.exists(mk_file):
+    mk_file = os.path.join(pwd, 'Android.bp')
+  subprocess.check_call(['mv', os.path.join(pwd, 'tmp_makefile'), mk_file])
+
+
+# Run script to build benchmark
+def build_bench(bench, source_dir):
+  logging.info('Start building benchmark...')
+
+  raw_cmd = ('cd {android_home} '
+             '&& source build/envsetup.sh '
+             '&& lunch {product_combo} '
+             '&& mmma {source_dir} -j48'.format(
+                 android_home=config.android_home,
+                 product_combo=config.product_combo,
+                 source_dir=source_dir))
+
+  log_file = os.path.join(config.bench_suite_dir, 'build_log')
+  with open(log_file, 'a') as logfile:
+    log_head = 'Log for building benchmark: %s\n' % (bench)
+    logfile.write(log_head)
+    try:
+      subprocess.check_call(
+          ['bash', '-c', raw_cmd], stdout=logfile, stderr=logfile)
+    except subprocess.CalledProcessError:
+      logging.error('Error while running %s, please check '
+                    '%s for more info.', raw_cmd, log_file)
+      restore_makefile(bench)
+      raise
+
+  logging.info('Logs for building benchmark %s are written to %s.', bench,
+               log_file)
+  logging.info('Benchmark built successfully!')
+
+
+def main(argv):
+  arguments = _parse_arguments_internal(argv)
+
+  bench = arguments.bench
+  compiler = arguments.compiler_dir
+  build_os = arguments.build_os
+  llvm_version = arguments.llvm_prebuilts_version
+  cflags = arguments.cflags
+  ldflags = arguments.ldflags
+
+  try:
+    source_dir = config.bench_dict[bench]
+  except KeyError:
+    logging.error('Please select one benchmark from the list below:\n\t' +
+                  '\n\t'.join(config.bench_list))
+    raise
+
+  set_compiler_env(bench, compiler, build_os, llvm_version, cflags, ldflags)
+
+  build_bench(bench, source_dir)
+
+  # If flags has been set, remember to restore the makefile/blueprint to
+  # original ones.
+  restore_makefile(bench)
+
+  # If a tmp directory is used for compiler path, remove it after building.
+  if compiler:
+    remove_tmp_dir()
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/android_bench_suite/config.py b/android_bench_suite/config.py
new file mode 100644
index 0000000..4cfd261
--- /dev/null
+++ b/android_bench_suite/config.py
@@ -0,0 +1,94 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Configuration file for the benchmark suite."""
+from __future__ import print_function
+
+import ConfigParser
+import os
+
+from parse_result import parse_Panorama
+from parse_result import parse_Dex2oat
+from parse_result import parse_Hwui
+from parse_result import parse_Skia
+from parse_result import parse_Synthmark
+from parse_result import parse_Binder
+
+from set_flags import add_flags_Panorama
+from set_flags import add_flags_Dex2oat
+from set_flags import add_flags_Hwui
+from set_flags import add_flags_Skia
+from set_flags import add_flags_Synthmark
+from set_flags import add_flags_Binder
+
+home = os.environ['HOME']
+
+# Load user configurations for default envrionments
+env_config = ConfigParser.ConfigParser(allow_no_value=True)
+env_config.read('env_setting')
+
+def get_suite_env(name, path=False):
+  variable = env_config.get('Suite_Environment', name)
+  if variable:
+    if path and not os.path.isdir(variable):
+      raise ValueError('The path of %s does not exist.' % name)
+    return variable
+  else:
+    raise ValueError('Please specify %s in env_setting' % name)
+
+# Android source code type: internal or aosp
+android_type = get_suite_env('android_type')
+
+# Android home directory specified as android_home,
+android_home = get_suite_env('android_home', True)
+
+# The benchmark results will be saved in bench_suite_dir.
+# Please create a directory to store the results, default directory is
+# android_home/benchtoolchain
+bench_suite_dir = get_suite_env('bench_suite_dir', True)
+
+# Crosperf directory is used to generate crosperf report.
+toolchain_utils = get_suite_env('toolchain_utils', True)
+
+# Please change both product and architecture at same time
+# Product can be chosen from the lunch list of android building.
+product_combo = get_suite_env('product_combo')
+
+# Arch can be found from out/target/product
+product = get_suite_env('product')
+
+# Benchmarks list is in following variables, you can change it adding new
+# benchmarks.
+bench_dict = {
+    'Panorama': 'packages/apps/LegacyCamera/jni/',
+    'Dex2oat': 'art/compiler/',
+    'Hwui': 'frameworks/base/libs/hwui/',
+    'Skia': 'external/skia/',
+    'Synthmark': 'synthmark/',
+    'Binder': 'frameworks/native/libs/binder/',
+}
+
+bench_parser_dict = {
+    'Panorama': parse_Panorama,
+    'Dex2oat': parse_Dex2oat,
+    'Hwui': parse_Hwui,
+    'Skia': parse_Skia,
+    'Synthmark': parse_Synthmark,
+    'Binder': parse_Binder,
+}
+
+bench_flags_dict = {
+    'Panorama': add_flags_Panorama,
+    'Dex2oat': add_flags_Dex2oat,
+    'Hwui': add_flags_Hwui,
+    'Skia': add_flags_Skia,
+    'Synthmark': add_flags_Synthmark,
+    'Binder': add_flags_Binder,
+}
+
+bench_list = bench_dict.keys()
+
+# Directories used in the benchmark suite
+autotest_dir = 'external/autotest/'
+out_dir = os.path.join(android_home, 'out')
diff --git a/android_bench_suite/dex2oat_input/test1.apk b/android_bench_suite/dex2oat_input/test1.apk
new file mode 100644
index 0000000..16cc638
--- /dev/null
+++ b/android_bench_suite/dex2oat_input/test1.apk
Binary files differ
diff --git a/android_bench_suite/dex2oat_input/test2.apk b/android_bench_suite/dex2oat_input/test2.apk
new file mode 100644
index 0000000..4dc0aa0
--- /dev/null
+++ b/android_bench_suite/dex2oat_input/test2.apk
Binary files differ
diff --git a/android_bench_suite/discard_patches.py b/android_bench_suite/discard_patches.py
new file mode 100755
index 0000000..7a4b0b5
--- /dev/null
+++ b/android_bench_suite/discard_patches.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Script to discard all the patches added to Android for this suite"""
+
+from __future__ import print_function
+
+import config
+import os
+import subprocess
+
+
+def discard_git(path):
+  try:
+    subprocess.check_call(['git', '-C', path, 'reset'])
+    subprocess.check_call(['git', '-C', path, 'clean', '-fdx'])
+    subprocess.check_call(['git', '-C', path, 'stash'])
+    print('Patch in %s removed successfully!' % path)
+  except subprocess.CalledProcessError:
+    print('Error while removing patch in %s' % path)
+
+
+def dispatch_skia():
+  skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
+  discard_git(skia_dir)
+
+
+def dispatch_autotest():
+  autotest_dir = os.path.join(config.android_home, config.autotest_dir)
+  discard_git(autotest_dir)
+
+
+def dispatch_panorama():
+  panorama_dir = os.path.join(config.android_home,
+                              config.bench_dict['Panorama'])
+  discard_git(panorama_dir)
+
+
+def dispatch_synthmark():
+  synthmark_dir = 'synthmark'
+  try:
+    subprocess.check_call(
+        ['rm', '-rf',
+         os.path.join(config.android_home, synthmark_dir)])
+    print('Synthmark patch removed successfully!')
+  except subprocess.CalledProcessError:
+    print('Synthmark is not removed. Error occurred.')
+
+
+def main():
+  dispatch_skia()
+  dispatch_autotest()
+  dispatch_panorama()
+  dispatch_synthmark()
+
+
+if __name__ == '__main__':
+  main()
diff --git a/android_bench_suite/env_setting b/android_bench_suite/env_setting
new file mode 100644
index 0000000..397888b
--- /dev/null
+++ b/android_bench_suite/env_setting
@@ -0,0 +1,31 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[Suite_Environment]
+
+# Please indicate the android source type: internal or aosp
+
+android_type = aosp
+
+# This is basically the root of your android sources.
+
+android_home = /work/android/aosp
+
+# This is the directory where you want to put the logs and results to.
+# Please create a directory first before specify it.
+
+bench_suite_dir = /work/android/bench_suite_dir
+
+# Crosperf is a toolchain utility to help generating html report of the test.
+# Please specify the path to it here.
+
+toolchain_utils = /chromium_source/src/third_party/toolchain-utils/crosperf
+
+# Product_combo and product is the type of image you want to build.
+# The product_combo list can be found with 'lunch' instruction.
+# The product is usually the middle part of the combo name, also can be found in
+# $ANDROID_HOME/out/target/product/
+
+product_combo = aosp_bullhead-userdebug
+product = bullhead
diff --git a/android_bench_suite/fix_json.py b/android_bench_suite/fix_json.py
new file mode 100755
index 0000000..cf94dd6
--- /dev/null
+++ b/android_bench_suite/fix_json.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# pylint: disable=cros-logging-import
+
+"""Script to re-format json result to one with branch_name and build_id"""
+from __future__ import print_function
+
+import argparse
+import config
+import json
+import logging
+import os
+import subprocess
+import sys
+
+# Turn the logging level to INFO before importing other autotest
+# code, to avoid having failed import logging messages confuse the
+# test_droid user.
+logging.basicConfig(level=logging.INFO)
+
+
+def _parse_arguments_internal(argv):
+  parser = argparse.ArgumentParser(description='Convert result to JSON'
+                                   'format')
+  parser.add_argument(
+      '-b', '--bench', help='Generate JSON format file for which benchmark.')
+  return parser.parse_args(argv)
+
+def fix_json(bench):
+  # Set environment variable for crosperf
+  os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
+
+  logging.info('Generating Crosperf Report...')
+  json_path = os.path.join(config.bench_suite_dir, bench + '_refined')
+  crosperf_cmd = [
+      os.path.join(config.toolchain_utils, 'generate_report.py'), '--json',
+      '-i=' + os.path.join(config.bench_suite_dir, bench + '.json'),
+      '-o=' + json_path, '-f'
+  ]
+
+  # Run crosperf generate_report.py
+  logging.info('Command: %s', crosperf_cmd)
+  subprocess.call(crosperf_cmd)
+
+  json_path += '.json'
+  with open(json_path) as fout:
+    objs = json.load(fout)
+  for obj in objs:
+    obj['branch_name'] = 'aosp/master'
+    obj['build_id'] = 0
+  with open(json_path, 'w') as fout:
+    json.dump(objs, fout)
+
+  logging.info('JSON file fixed successfully!')
+
+def main(argv):
+  arguments = _parse_arguments_internal(argv)
+
+  bench = arguments.bench
+
+  fix_json(bench)
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/android_bench_suite/fix_skia_results.py b/android_bench_suite/fix_skia_results.py
new file mode 100755
index 0000000..6eec6cc
--- /dev/null
+++ b/android_bench_suite/fix_skia_results.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# pylint: disable=cros-logging-import
+"""Transforms skia benchmark results to ones that crosperf can understand."""
+
+from __future__ import print_function
+
+import itertools
+import logging
+import json
+import sys
+
+# Turn the logging level to INFO before importing other autotest
+# code, to avoid having failed import logging messages confuse the
+# test_droid user.
+logging.basicConfig(level=logging.INFO)
+
+# All of the results we care about, by name.
+# Each of these *must* end in _ns, _us, _ms, or _s, since all the metrics we
+# collect (so far) are related to time, and we alter the results based on the
+# suffix of these strings (so we don't have 0.000421ms per sample, for example)
+_RESULT_RENAMES = {
+    'memset32_100000_640_480_nonrendering': 'memset_time_ms',
+    'path_equality_50%_640_480_nonrendering': 'path_equality_ns',
+    'sort_qsort_backward_640_480_nonrendering': 'qsort_us'
+}
+
+
+def _GetFamiliarName(name):
+  r = _RESULT_RENAMES[name]
+  return r if r else name
+
+
+def _IsResultInteresting(name):
+  return name in _RESULT_RENAMES
+
+
+def _GetTimeMultiplier(label_name):
+  """Given a time (in milliseconds), normalize it to what label_name expects.
+
+  "What label_name expects" meaning "we pattern match against the last few
+  non-space chars in label_name."
+
+  This expects the time unit to be separated from anything else by '_'.
+  """
+  ms_mul = 1000 * 1000.
+  endings = [('_ns', 1), ('_us', 1000), ('_ms', ms_mul), ('_s', ms_mul * 1000)]
+  for end, mul in endings:
+    if label_name.endswith(end):
+      return ms_mul / mul
+  raise ValueError('Unknown ending in "%s"; expecting one of %s' %
+                   (label_name, [end for end, _ in endings]))
+
+
+def _GetTimeDenom(ms):
+  """Given a list of times (in milliseconds), find a sane time unit for them.
+
+  Returns the unit name, and `ms` normalized to that time unit.
+
+  >>> _GetTimeDenom([1, 2, 3])
+  ('ms', [1.0, 2.0, 3.0])
+  >>> _GetTimeDenom([.1, .2, .3])
+  ('us', [100.0, 200.0, 300.0])
+  """
+
+  ms_mul = 1000 * 1000
+  units = [('us', 1000), ('ms', ms_mul), ('s', ms_mul * 1000)]
+  for name, mul in reversed(units):
+    normalized = [float(t) * ms_mul / mul for t in ms]
+    average = sum(normalized) / len(normalized)
+    if all(n > 0.1 for n in normalized) and average >= 1:
+      return name, normalized
+
+  normalized = [float(t) * ms_mul for t in ms]
+  return 'ns', normalized
+
+
+def _TransformBenchmarks(raw_benchmarks):
+  # We get {"results": {"bench_name": Results}}
+  # where
+  #   Results = {"config_name": {"samples": [float], etc.}}
+  #
+  # We want {"data": {"skia": [[BenchmarkData]]},
+  #          "platforms": ["platform1, ..."]}
+  # where
+  #   BenchmarkData = {"bench_name": bench_samples[N], ..., "retval": 0}
+  #
+  # Note that retval is awkward -- crosperf's JSON reporter reports the result
+  # as a failure if it's not there. Everything else treats it like a
+  # statistic...
+  benchmarks = raw_benchmarks['results']
+  results = []
+  for bench_name, bench_result in benchmarks.iteritems():
+    try:
+      for cfg_name, keyvals in bench_result.iteritems():
+        # Some benchmarks won't have timing data (either it won't exist at all,
+        # or it'll be empty); skip them.
+        samples = keyvals.get('samples')
+        if not samples:
+          continue
+
+        bench_name = '%s_%s' % (bench_name, cfg_name)
+        if not _IsResultInteresting(bench_name):
+          continue
+
+        friendly_name = _GetFamiliarName(bench_name)
+        if len(results) < len(samples):
+          results.extend({
+              'retval': 0
+          } for _ in xrange(len(samples) - len(results)))
+
+        time_mul = _GetTimeMultiplier(friendly_name)
+        for sample, app in itertools.izip(samples, results):
+          assert friendly_name not in app
+          app[friendly_name] = sample * time_mul
+    except (KeyError, ValueError) as e:
+      logging.error('While converting "%s" (key: %s): %s',
+                    bench_result, bench_name, e.message)
+      raise
+
+  # Realistically, [results] should be multiple results, where each entry in the
+  # list is the result for a different label. Because we only deal with one
+  # label at the moment, we need to wrap it in its own list.
+  return results
+
+
+if __name__ == '__main__':
+
+  def _GetUserFile(argv):
+    if not argv or argv[0] == '-':
+      return sys.stdin
+    return open(argv[0])
+
+  def _Main():
+    with _GetUserFile(sys.argv[1:]) as in_file:
+      obj = json.load(in_file)
+    output = _TransformBenchmarks(obj)
+    json.dump(output, sys.stdout)
+
+  _Main()
diff --git a/android_bench_suite/gen_json.py b/android_bench_suite/gen_json.py
new file mode 100755
index 0000000..ad617ff
--- /dev/null
+++ b/android_bench_suite/gen_json.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# pylint: disable=cros-logging-import
+
+"""Script to help generate json format report from raw data."""
+from __future__ import print_function
+
+import argparse
+import config
+import json
+import logging
+import sys
+
+# Turn the logging level to INFO before importing other autotest
+# code, to avoid having failed import logging messages confuse the
+# test_droid user.
+logging.basicConfig(level=logging.INFO)
+
+
+def _parse_arguments_internal(argv):
+  parser = argparse.ArgumentParser(description='Convert result to JSON'
+                                   'format')
+
+  parser.add_argument(
+      '-b', '--bench', help='Generate JSON format file for which benchmark.')
+
+  parser.add_argument(
+      '-i', '--input', help='Specify the input result file name.')
+
+  parser.add_argument(
+      '-o', '--output', help='Specify the output JSON format result file')
+
+  parser.add_argument(
+      '-p',
+      '--platform',
+      help='Indicate the platform(experiment or device) name '
+      'to be shown in JSON')
+
+  parser.add_argument(
+      '--iterations',
+      type=int,
+      help='How many iterations does the result include.')
+  return parser.parse_args(argv)
+
+# Collect data and generate JSON {} tuple from benchmark result
+def collect_data(infile, bench, it):
+  result_dict = {}
+  with open(infile + str(it)) as fin:
+    if bench not in config.bench_parser_dict:
+      logging.error('Please input the correct benchmark name.')
+      raise ValueError('Wrong benchmark name: %s' % bench)
+    parse = config.bench_parser_dict[bench]
+    result_dict = parse(bench, fin)
+  return result_dict
+
+# If there is no original output file, create a new one and init it.
+def create_outfile(outfile, bench):
+  with open(outfile, 'w') as fout:
+    obj_null = {'data': {bench.lower(): []}, 'platforms': []}
+    json.dump(obj_null, fout)
+
+# Seek the original output file and try to add new result into it.
+def get_outfile(outfile, bench):
+  try:
+    return open(outfile)
+  except IOError:
+    create_outfile(outfile, bench)
+    return open(outfile)
+
+def main(argv):
+  arguments = _parse_arguments_internal(argv)
+
+  bench = arguments.bench
+  infile = arguments.input
+  outfile = arguments.output
+  platform = arguments.platform
+  iteration = arguments.iterations
+
+  result = []
+  for i in xrange(iteration):
+    result += collect_data(infile, bench, i)
+
+  with get_outfile(outfile, bench) as fout:
+    obj = json.load(fout)
+  obj['platforms'].append(platform)
+  obj['data'][bench.lower()].append(result)
+  with open(outfile, 'w') as fout:
+    json.dump(obj, fout)
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/android_bench_suite/panorama.diff b/android_bench_suite/panorama.diff
new file mode 100644
index 0000000..fcd214e
--- /dev/null
+++ b/android_bench_suite/panorama.diff
@@ -0,0 +1,763 @@
+diff --git a/jni/Android.mk b/jni/Android.mk
+index 8b816270..d48e0d8e 100755
+--- a/jni/Android.mk
++++ b/jni/Android.mk
+@@ -1,50 +1,53 @@
++local_target_dir := $(TARGET_OUT_DATA)/local/tmp
++
+ LOCAL_PATH:= $(call my-dir)
+ 
+ include $(CLEAR_VARS)
+ 
+ LOCAL_C_INCLUDES := \
+-        $(LOCAL_PATH)/feature_stab/db_vlvm \
+-        $(LOCAL_PATH)/feature_stab/src \
+-        $(LOCAL_PATH)/feature_stab/src/dbreg \
+-        $(LOCAL_PATH)/feature_mos/src \
+-        $(LOCAL_PATH)/feature_mos/src/mosaic
+-
+-LOCAL_CFLAGS := -O3 -DNDEBUG
+-LOCAL_CPPFLAGS := -std=gnu++98
+-
+-LOCAL_SRC_FILES := \
+-        feature_mos_jni.cpp \
+-        mosaic_renderer_jni.cpp \
+-        feature_mos/src/mosaic/trsMatrix.cpp \
+-        feature_mos/src/mosaic/AlignFeatures.cpp \
+-        feature_mos/src/mosaic/Blend.cpp \
+-        feature_mos/src/mosaic/Delaunay.cpp \
+-        feature_mos/src/mosaic/ImageUtils.cpp \
+-        feature_mos/src/mosaic/Mosaic.cpp \
+-        feature_mos/src/mosaic/Pyramid.cpp \
+-        feature_mos/src/mosaic_renderer/Renderer.cpp \
+-        feature_mos/src/mosaic_renderer/WarpRenderer.cpp \
+-        feature_mos/src/mosaic_renderer/SurfaceTextureRenderer.cpp \
+-        feature_mos/src/mosaic_renderer/YVURenderer.cpp \
+-        feature_mos/src/mosaic_renderer/FrameBuffer.cpp \
+-        feature_stab/db_vlvm/db_feature_detection.cpp \
+-        feature_stab/db_vlvm/db_feature_matching.cpp \
+-        feature_stab/db_vlvm/db_framestitching.cpp \
+-        feature_stab/db_vlvm/db_image_homography.cpp \
+-        feature_stab/db_vlvm/db_rob_image_homography.cpp \
+-        feature_stab/db_vlvm/db_utilities.cpp \
+-        feature_stab/db_vlvm/db_utilities_camera.cpp \
+-        feature_stab/db_vlvm/db_utilities_indexing.cpp \
+-        feature_stab/db_vlvm/db_utilities_linalg.cpp \
+-        feature_stab/db_vlvm/db_utilities_poly.cpp \
+-        feature_stab/src/dbreg/dbreg.cpp \
+-        feature_stab/src/dbreg/dbstabsmooth.cpp \
+-        feature_stab/src/dbreg/vp_motionmodel.c
++    $(LOCAL_PATH)/feature_mos/src \
++    $(LOCAL_PATH)/feature_stab/src \
++    $(LOCAL_PATH)/feature_stab/db_vlvm
+ 
+-LOCAL_SHARED_LIBRARIES := liblog libnativehelper libGLESv2
+-#LOCAL_LDLIBS := -L$(SYSROOT)/usr/lib -ldl -llog -lGLESv2 -L$(TARGET_OUT)
++LOCAL_SRC_FILES := benchmark.cpp \
++	feature_mos/src/mosaic/ImageUtils.cpp \
++    feature_mos/src/mosaic/Mosaic.cpp \
++    feature_mos/src/mosaic/AlignFeatures.cpp \
++    feature_mos/src/mosaic/Blend.cpp \
++    feature_mos/src/mosaic/Pyramid.cpp \
++    feature_mos/src/mosaic/trsMatrix.cpp \
++    feature_mos/src/mosaic/Delaunay.cpp \
++    feature_mos/src/mosaic_renderer/Renderer.cpp \
++    feature_mos/src/mosaic_renderer/WarpRenderer.cpp \
++    feature_mos/src/mosaic_renderer/SurfaceTextureRenderer.cpp \
++    feature_mos/src/mosaic_renderer/YVURenderer.cpp \
++    feature_mos/src/mosaic_renderer/FrameBuffer.cpp \
++    feature_stab/db_vlvm/db_rob_image_homography.cpp \
++    feature_stab/db_vlvm/db_feature_detection.cpp \
++    feature_stab/db_vlvm/db_image_homography.cpp \
++    feature_stab/db_vlvm/db_framestitching.cpp \
++    feature_stab/db_vlvm/db_feature_matching.cpp \
++    feature_stab/db_vlvm/db_utilities.cpp \
++    feature_stab/db_vlvm/db_utilities_camera.cpp \
++    feature_stab/db_vlvm/db_utilities_indexing.cpp \
++    feature_stab/db_vlvm/db_utilities_linalg.cpp \
++    feature_stab/db_vlvm/db_utilities_poly.cpp \
++    feature_stab/src/dbreg/dbstabsmooth.cpp \
++    feature_stab/src/dbreg/dbreg.cpp \
++    feature_stab/src/dbreg/vp_motionmodel.c
+ 
+-LOCAL_MODULE_TAGS := optional
++LOCAL_CFLAGS := -O3 -DNDEBUG -Wno-unused-parameter -Wno-maybe-uninitialized
++LOCAL_CFLAGS += $(CFLAGS_FOR_BENCH_SUITE)
++LOCAL_LDFLAGS := $(LDFLAGS_FOR_BENCH_SUITE)
++LOCAL_CPPFLAGS := -std=c++98
++LOCAL_MODULE_TAGS := tests
++LOCAL_MODULE := panorama_bench
++LOCAL_MODULE_STEM_32 := panorama_bench
++LOCAL_MODULE_STEM_64 := panorama_bench64
++LOCAL_MULTILIB := both
++LOCAL_MODULE_PATH := $(local_target_dir)
++LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
++LOCAL_FORCE_STATIC_EXECUTABLE := true
++LOCAL_STATIC_LIBRARIES := libc libm
+ 
+-LOCAL_MODULE    := libjni_legacymosaic
+-include $(BUILD_SHARED_LIBRARY)
++include $(BUILD_EXECUTABLE)
+diff --git a/jni/benchmark.cpp b/jni/benchmark.cpp
+new file mode 100755
+index 00000000..2a6440f4
+--- /dev/null
++++ b/jni/benchmark.cpp
+@@ -0,0 +1,131 @@
++/*
++ * Copyright (C) 2012 The Android Open Source Project
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *      http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++#include <time.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <unistd.h>
++
++#include "mosaic/Mosaic.h"
++#include "mosaic/ImageUtils.h"
++
++#define MAX_FRAMES 200
++#define KERNEL_ITERATIONS 10
++
++const int blendingType = Blend::BLEND_TYPE_HORZ;
++const int stripType = Blend::STRIP_TYPE_WIDE;
++
++ImageType yvuFrames[MAX_FRAMES];
++
++int loadImages(const char* basename, int &width, int &height)
++{
++    char filename[512];
++    struct stat filestat;
++    int i;
++
++    for (i = 0; i < MAX_FRAMES; i++) {
++        sprintf(filename, "%s_%03d.ppm", basename, i + 1);
++        if (stat(filename, &filestat) != 0) break;
++        ImageType rgbFrame = ImageUtils::readBinaryPPM(filename, width, height);
++        yvuFrames[i] = ImageUtils::allocateImage(width, height,
++                                ImageUtils::IMAGE_TYPE_NUM_CHANNELS);
++        ImageUtils::rgb2yvu(yvuFrames[i], rgbFrame, width, height);
++        ImageUtils::freeImage(rgbFrame);
++    }
++    return i;
++}
++
++int main(int argc, char **argv)
++{
++    struct timespec t1, t2, t3;
++
++    int width, height;
++    float totalElapsedTime = 0;
++
++    const char *basename;
++    const char *filename;
++
++    if (argc != 3) {
++        printf("Usage: %s input_dir output_filename\n", argv[0]);
++        return 0;
++    } else {
++        basename = argv[1];
++        filename = argv[2];
++    }
++
++    // Load the images outside the computational kernel
++    int totalFrames = loadImages(basename, width, height);
++
++    if (totalFrames == 0) {
++        printf("Image files not found. Make sure %s exists.\n",
++               basename);
++        return 1;
++    }
++
++    printf("%d frames loaded\n", totalFrames);
++
++
++    // Interesting stuff is here
++    for (int iteration = 0; iteration < KERNEL_ITERATIONS; iteration++)  {
++        Mosaic mosaic;
++
++        mosaic.initialize(blendingType, stripType, width, height, -1, false, 0);
++
++        clock_gettime(CLOCK_MONOTONIC, &t1);
++        for (int i = 0; i < totalFrames; i++) {
++            mosaic.addFrame(yvuFrames[i]);
++        }
++        clock_gettime(CLOCK_MONOTONIC, &t2);
++
++        float progress = 0.0;
++        bool cancelComputation = false;
++
++        mosaic.createMosaic(progress, cancelComputation);
++
++        int mosaicWidth, mosaicHeight;
++        ImageType resultYVU = mosaic.getMosaic(mosaicWidth, mosaicHeight);
++
++        ImageType imageRGB = ImageUtils::allocateImage(
++            mosaicWidth, mosaicHeight, ImageUtils::IMAGE_TYPE_NUM_CHANNELS);
++
++        clock_gettime(CLOCK_MONOTONIC, &t3);
++
++        float elapsedTime =
++            (t3.tv_sec - t1.tv_sec) + (t3.tv_nsec - t1.tv_nsec)/1e9;
++        float addImageTime =
++            (t2.tv_sec - t1.tv_sec) + (t2.tv_nsec - t1.tv_nsec)/1e9;
++        float stitchImageTime =
++            (t3.tv_sec - t2.tv_sec) + (t3.tv_nsec - t2.tv_nsec)/1e9;
++
++        totalElapsedTime += elapsedTime;
++
++        printf("Iteration %d: %dx%d moasic created: "
++               "%.2f seconds (%.2f + %.2f)\n",
++               iteration, mosaicWidth, mosaicHeight,
++               elapsedTime, addImageTime, stitchImageTime);
++
++        // Write the output only once for correctness check
++        if (iteration == 0) {
++            ImageUtils::yvu2rgb(imageRGB, resultYVU, mosaicWidth,
++                                mosaicHeight);
++            ImageUtils::writeBinaryPPM(imageRGB, filename, mosaicWidth,
++                                       mosaicHeight);
++        }
++    }
++    printf("Total elapsed time: %.2f seconds\n", totalElapsedTime);
++
++    return 0;
++}
+diff --git a/jni/feature_mos/src/mosaic/AlignFeatures.cpp b/jni/feature_mos/src/mosaic/AlignFeatures.cpp
+index aeabf8f9..703a5ea5 100644
+--- a/jni/feature_mos/src/mosaic/AlignFeatures.cpp
++++ b/jni/feature_mos/src/mosaic/AlignFeatures.cpp
+@@ -30,6 +30,8 @@
+ 
+ #define LOG_TAG "AlignFeatures"
+ 
++const double Align::DEFAULT_MAX_DISPARITY = 0.1;
++
+ Align::Align()
+ {
+   width = height = 0;
+diff --git a/jni/feature_mos/src/mosaic/AlignFeatures.h b/jni/feature_mos/src/mosaic/AlignFeatures.h
+index 19f39051..9999f575 100644
+--- a/jni/feature_mos/src/mosaic/AlignFeatures.h
++++ b/jni/feature_mos/src/mosaic/AlignFeatures.h
+@@ -44,7 +44,7 @@ public:
+   ///// Settings for feature-based alignment
+   // Number of features to use from corner detection
+   static const int DEFAULT_NR_CORNERS=750;
+-  static const double DEFAULT_MAX_DISPARITY=0.1;//0.4;
++  static const double DEFAULT_MAX_DISPARITY;//0.4;
+   // Type of homography to model
+   static const int DEFAULT_MOTION_MODEL=DB_HOMOGRAPHY_TYPE_R_T;
+ // static const int DEFAULT_MOTION_MODEL=DB_HOMOGRAPHY_TYPE_PROJECTIVE;
+diff --git a/jni/feature_mos/src/mosaic/Blend.cpp b/jni/feature_mos/src/mosaic/Blend.cpp
+index e37755de..b6a843a2 100644
+--- a/jni/feature_mos/src/mosaic/Blend.cpp
++++ b/jni/feature_mos/src/mosaic/Blend.cpp
+@@ -26,8 +26,8 @@
+ #include "Geometry.h"
+ #include "trsMatrix.h"
+ 
+-#include "Log.h"
+-#define LOG_TAG "BLEND"
++const float Blend::LIMIT_SIZE_MULTIPLIER = 50.f * 2.0f;
++const float Blend::LIMIT_HEIGHT_MULTIPLIER = 2.5f;
+ 
+ Blend::Blend()
+ {
+@@ -67,7 +67,6 @@ int Blend::initialize(int blendingType, int stripType, int frame_width, int fram
+ 
+     if (!m_pFrameYPyr || !m_pFrameUPyr || !m_pFrameVPyr)
+     {
+-        LOGE("Error: Could not allocate pyramids for blending");
+         return BLEND_RET_ERROR_MEMORY;
+     }
+ 
+@@ -122,7 +121,6 @@ int Blend::runBlend(MosaicFrame **oframes, MosaicFrame **rframes,
+ 
+     if (numCenters == 0)
+     {
+-        LOGE("Error: No frames to blend");
+         return BLEND_RET_ERROR;
+     }
+ 
+@@ -228,9 +226,6 @@ int Blend::runBlend(MosaicFrame **oframes, MosaicFrame **rframes,
+ 
+     if (xRightMost <= xLeftMost || yBottomMost <= yTopMost)
+     {
+-        LOGE("RunBlend: aborting -consistency check failed,"
+-             "(xLeftMost, xRightMost, yTopMost, yBottomMost): (%d, %d, %d, %d)",
+-             xLeftMost, xRightMost, yTopMost, yBottomMost);
+         return BLEND_RET_ERROR;
+     }
+ 
+@@ -241,17 +236,12 @@ int Blend::runBlend(MosaicFrame **oframes, MosaicFrame **rframes,
+     ret = MosaicSizeCheck(LIMIT_SIZE_MULTIPLIER, LIMIT_HEIGHT_MULTIPLIER);
+     if (ret != BLEND_RET_OK)
+     {
+-       LOGE("RunBlend: aborting - mosaic size check failed, "
+-            "(frame_width, frame_height) vs (mosaic_width, mosaic_height): "
+-            "(%d, %d) vs (%d, %d)", width, height, Mwidth, Mheight);
+        return ret;
+     }
+ 
+-    LOGI("Allocate mosaic image for blending - size: %d x %d", Mwidth, Mheight);
+     YUVinfo *imgMos = YUVinfo::allocateImage(Mwidth, Mheight);
+     if (imgMos == NULL)
+     {
+-        LOGE("RunBlend: aborting - couldn't alloc %d x %d mosaic image", Mwidth, Mheight);
+         return BLEND_RET_ERROR_MEMORY;
+     }
+ 
+@@ -362,7 +352,6 @@ int Blend::FillFramePyramid(MosaicFrame *mb)
+             !PyramidShort::BorderReduce(m_pFrameUPyr, m_wb.nlevsC) || !PyramidShort::BorderExpand(m_pFrameUPyr, m_wb.nlevsC, -1) ||
+             !PyramidShort::BorderReduce(m_pFrameVPyr, m_wb.nlevsC) || !PyramidShort::BorderExpand(m_pFrameVPyr, m_wb.nlevsC, -1))
+     {
+-        LOGE("Error: Could not generate Laplacian pyramids");
+         return BLEND_RET_ERROR;
+     }
+     else
+@@ -384,7 +373,6 @@ int Blend::DoMergeAndBlend(MosaicFrame **frames, int nsite,
+     m_pMosaicVPyr = PyramidShort::allocatePyramidPacked(m_wb.nlevsC,(unsigned short)rect.Width(),(unsigned short)rect.Height(),BORDER);
+     if (!m_pMosaicYPyr || !m_pMosaicUPyr || !m_pMosaicVPyr)
+     {
+-      LOGE("Error: Could not allocate pyramids for blending");
+       return BLEND_RET_ERROR_MEMORY;
+     }
+ 
+@@ -579,6 +567,11 @@ int Blend::DoMergeAndBlend(MosaicFrame **frames, int nsite,
+     // Blend
+     PerformFinalBlending(imgMos, cropping_rect);
+ 
++    if (cropping_rect.Width() <= 0 || cropping_rect.Height() <= 0)
++    {
++        return BLEND_RET_ERROR;
++    }
++
+     if (m_pMosaicVPyr) free(m_pMosaicVPyr);
+     if (m_pMosaicUPyr) free(m_pMosaicUPyr);
+     if (m_pMosaicYPyr) free(m_pMosaicYPyr);
+@@ -632,7 +625,6 @@ int Blend::PerformFinalBlending(YUVinfo &imgMos, MosaicRect &cropping_rect)
+     if (!PyramidShort::BorderExpand(m_pMosaicYPyr, m_wb.nlevs, 1) || !PyramidShort::BorderExpand(m_pMosaicUPyr, m_wb.nlevsC, 1) ||
+         !PyramidShort::BorderExpand(m_pMosaicVPyr, m_wb.nlevsC, 1))
+     {
+-      LOGE("Error: Could not BorderExpand!");
+       return BLEND_RET_ERROR;
+     }
+ 
+@@ -785,18 +777,31 @@ int Blend::PerformFinalBlending(YUVinfo &imgMos, MosaicRect &cropping_rect)
+                 break;
+             }
+         }
++
+     }
+ 
++    RoundingCroppingSizeToMultipleOf8(cropping_rect);
++
+     for(int j=0; j<imgMos.Y.height; j++)
+     {
+         delete b[j];
+     }
+ 
+-    delete b;
++    delete[] b;
+ 
+     return BLEND_RET_OK;
+ }
+ 
++void Blend::RoundingCroppingSizeToMultipleOf8(MosaicRect &rect) {
++    int height = rect.bottom - rect.top + 1;
++    int residue = height & 7;
++    rect.bottom -= residue;
++
++    int width = rect.right - rect.left + 1;
++    residue = width & 7;
++    rect.right -= residue;
++}
++
+ void Blend::ComputeMask(CSite *csite, BlendRect &vcrect, BlendRect &brect, MosaicRect &rect, YUVinfo &imgMos, int site_idx)
+ {
+     PyramidShort *dptr = m_pMosaicYPyr;
+diff --git a/jni/feature_mos/src/mosaic/Blend.h b/jni/feature_mos/src/mosaic/Blend.h
+index 6371fdeb..175eacd4 100644
+--- a/jni/feature_mos/src/mosaic/Blend.h
++++ b/jni/feature_mos/src/mosaic/Blend.h
+@@ -119,9 +119,10 @@ protected:
+   void CropFinalMosaic(YUVinfo &imgMos, MosaicRect &cropping_rect);
+ 
+ private:
+-   static const float LIMIT_SIZE_MULTIPLIER = 5.0f * 2.0f;
+-   static const float LIMIT_HEIGHT_MULTIPLIER = 2.5f;
++   static const float LIMIT_SIZE_MULTIPLIER;
++   static const float LIMIT_HEIGHT_MULTIPLIER;
+    int MosaicSizeCheck(float sizeMultiplier, float heightMultiplier);
++   void RoundingCroppingSizeToMultipleOf8(MosaicRect& rect);
+ };
+ 
+ #endif
+diff --git a/jni/feature_mos/src/mosaic/Delaunay.cpp b/jni/feature_mos/src/mosaic/Delaunay.cpp
+index 82f5d203..0ce09fc5 100644
+--- a/jni/feature_mos/src/mosaic/Delaunay.cpp
++++ b/jni/feature_mos/src/mosaic/Delaunay.cpp
+@@ -24,7 +24,7 @@
+ 
+ #define QQ 9   // Optimal value as determined by testing
+ #define DM 38  // 2^(1+DM/2) element sort capability. DM=38 for >10^6 elements
+-#define NYL (-1)
++#define NYL -1
+ #define valid(l) ccw(orig(basel), dest(l), dest(basel))
+ 
+ 
+diff --git a/jni/feature_mos/src/mosaic/ImageUtils.cpp b/jni/feature_mos/src/mosaic/ImageUtils.cpp
+index 6d0aac0c..daa86060 100644
+--- a/jni/feature_mos/src/mosaic/ImageUtils.cpp
++++ b/jni/feature_mos/src/mosaic/ImageUtils.cpp
+@@ -283,7 +283,7 @@ ImageType ImageUtils::readBinaryPPM(const char *filename, int &width, int &heigh
+ 
+   FILE *imgin = NULL;
+   int mval=0, format=0, eret;
+-  ImageType ret = IMAGE_TYPE_NOIMAGE;
++  ImageType ret = NULL;//IMAGE_TYPE_NOIMAGE;
+ 
+   imgin = fopen(filename, "r");
+   if (imgin == NULL) {
+diff --git a/jni/feature_mos/src/mosaic/ImageUtils.h b/jni/feature_mos/src/mosaic/ImageUtils.h
+index 87782383..92965ca8 100644
+--- a/jni/feature_mos/src/mosaic/ImageUtils.h
++++ b/jni/feature_mos/src/mosaic/ImageUtils.h
+@@ -47,7 +47,7 @@ public:
+   /**
+    *  Definition of an empty image.
+    */
+-  static const int IMAGE_TYPE_NOIMAGE = NULL;
++  static const int IMAGE_TYPE_NOIMAGE = 0;
+ 
+   /**
+    *  Convert image from BGR (interlaced) to YVU (non-interlaced)
+diff --git a/jni/feature_mos/src/mosaic/Log.h b/jni/feature_mos/src/mosaic/Log.h
+index cf6f14b1..2adfeda9 100644
+--- a/jni/feature_mos/src/mosaic/Log.h
++++ b/jni/feature_mos/src/mosaic/Log.h
+@@ -14,7 +14,7 @@
+  * limitations under the License.
+  */
+ #ifndef LOG_H_
+-#define LOG_H_
++#define LOG_H_
+ 
+ #include <android/log.h>
+ #define LOGV(...) __android_log_print(ANDROID_LOG_SILENT, LOG_TAG, __VA_ARGS__)
+diff --git a/jni/feature_mos/src/mosaic/Mosaic.cpp b/jni/feature_mos/src/mosaic/Mosaic.cpp
+index f17c030b..4abc6f68 100644
+--- a/jni/feature_mos/src/mosaic/Mosaic.cpp
++++ b/jni/feature_mos/src/mosaic/Mosaic.cpp
+@@ -26,9 +26,6 @@
+ #include "Mosaic.h"
+ #include "trsMatrix.h"
+ 
+-#include "Log.h"
+-#define LOG_TAG "MOSAIC"
+-
+ Mosaic::Mosaic()
+ {
+     initialized = false;
+@@ -47,6 +44,10 @@ Mosaic::~Mosaic()
+     delete frames;
+     delete rframes;
+ 
++    for (int j = 0; j < owned_size; j++)
++        delete owned_frames[j];
++    delete owned_frames;
++
+     if (aligner != NULL)
+         delete aligner;
+     if (blender != NULL)
+@@ -88,13 +89,10 @@ int Mosaic::initialize(int blendingType, int stripType, int width, int height, i
+         {
+             frames[i] = NULL;
+         }
+-
+-
+     }
+ 
+-    LOGV("Initialize %d %d", width, height);
+-    LOGV("Frame width %d,%d", width, height);
+-    LOGV("Max num frames %d", max_frames);
++    owned_frames = new ImageType[max_frames];
++    owned_size = 0;
+ 
+     aligner = new Align();
+     aligner->initialize(width, height,quarter_res,thresh_still);
+@@ -107,7 +105,6 @@ int Mosaic::initialize(int blendingType, int stripType, int width, int height, i
+         blender->initialize(blendingType, stripType, width, height);
+     } else {
+         blender = NULL;
+-        LOGE("Error: Unknown blending type %d",blendingType);
+         return MOSAIC_RET_ERROR;
+     }
+ 
+@@ -123,7 +120,15 @@ int Mosaic::addFrameRGB(ImageType imageRGB)
+     imageYVU = ImageUtils::allocateImage(this->width, this->height, ImageUtils::IMAGE_TYPE_NUM_CHANNELS);
+     ImageUtils::rgb2yvu(imageYVU, imageRGB, width, height);
+ 
+-    return addFrame(imageYVU);
++    int existing_frames_size = frames_size;
++    int ret = addFrame(imageYVU);
++
++    if (frames_size > existing_frames_size)
++        owned_frames[owned_size++] = imageYVU;
++    else
++        ImageUtils::freeImage(imageYVU);
++
++    return ret;
+ }
+ 
+ int Mosaic::addFrame(ImageType imageYVU)
+@@ -146,8 +151,6 @@ int Mosaic::addFrame(ImageType imageYVU)
+ 
+         if (frames_size >= max_frames)
+         {
+-            LOGV("WARNING: More frames than preallocated, ignoring."
+-                 "Increase maximum number of frames (-f <max_frames>) to avoid this");
+             return MOSAIC_RET_ERROR;
+         }
+ 
+diff --git a/jni/feature_mos/src/mosaic/Mosaic.h b/jni/feature_mos/src/mosaic/Mosaic.h
+index fc6ecd90..9dea6642 100644
+--- a/jni/feature_mos/src/mosaic/Mosaic.h
++++ b/jni/feature_mos/src/mosaic/Mosaic.h
+@@ -181,6 +181,12 @@ protected:
+   int frames_size;
+   int max_frames;
+ 
++  /**
++    * Implicitly created frames, should be freed by Mosaic.
++    */
++  ImageType *owned_frames;
++  int owned_size;
++
+   /**
+    * Initialization state.
+    */
+diff --git a/jni/feature_mos/src/mosaic/Pyramid.cpp b/jni/feature_mos/src/mosaic/Pyramid.cpp
+index 945eafba..b022d73d 100644
+--- a/jni/feature_mos/src/mosaic/Pyramid.cpp
++++ b/jni/feature_mos/src/mosaic/Pyramid.cpp
+@@ -154,24 +154,30 @@ void PyramidShort::BorderExpandOdd(PyramidShort *in, PyramidShort *out, PyramidS
+     // Vertical Filter
+     for (j = -off; j < in->height + off; j++) {
+         int j2 = j * 2;
+-        for (i = -scr->border; i < scr->width + scr->border; i++) {
++        int limit = scr->width + scr->border;
++        for (i = -scr->border; i < limit; i++) {
++            int t1 = in->ptr[j][i];
++            int t2 = in->ptr[j+1][i];
+             scr->ptr[j2][i] = (short)
+-                ((6 * in->ptr[j][i] + (in->ptr[j-1][i] + in->ptr[j+1][i]) + 4) >> 3);
+-            scr->ptr[j2+1][i] = (short)((in->ptr[j][i] + in->ptr[j+1][i] + 1) >> 1);
++                ((6 * t1 + (in->ptr[j-1][i] + t2) + 4) >> 3);
++            scr->ptr[j2+1][i] = (short)((t1 + t2 + 1) >> 1);
+         }
+     }
+ 
+     BorderSpread(scr, 0, 0, 3, 3);
+ 
+     // Horizontal Filter
+-    for (i = -off; i < scr->width + off; i++) {
+-        int i2 = i * 2;
+-        for (j = -out->border; j < out->height + out->border; j++) {
++    int limit = out->height + out->border;
++    for (j = -out->border; j < limit; j++) {
++        for (i = -off; i < scr->width + off; i++) {
++            int i2 = i * 2;
++            int t1 = scr->ptr[j][i];
++            int t2 = scr->ptr[j][i+1];
+             out->ptr[j][i2] = (short) (out->ptr[j][i2] +
+-                    (mode * ((6 * scr->ptr[j][i] +
+-                              scr->ptr[j][i-1] + scr->ptr[j][i+1] + 4) >> 3)));
++                    (mode * ((6 * t1 +
++                              scr->ptr[j][i-1] + t2 + 4) >> 3)));
+             out->ptr[j][i2+1] = (short) (out->ptr[j][i2+1] +
+-                    (mode * ((scr->ptr[j][i] + scr->ptr[j][i+1] + 1) >> 1)));
++                    (mode * ((t1 + t2 + 1) >> 1)));
+         }
+     }
+ 
+diff --git a/jni/feature_mos/src/mosaic_renderer/FrameBuffer.cpp b/jni/feature_mos/src/mosaic_renderer/FrameBuffer.cpp
+index 9a07e496..a956f23b 100755
+--- a/jni/feature_mos/src/mosaic_renderer/FrameBuffer.cpp
++++ b/jni/feature_mos/src/mosaic_renderer/FrameBuffer.cpp
+@@ -55,6 +55,8 @@ bool FrameBuffer::Init(int width, int height, GLenum format) {
+                            GL_TEXTURE_2D,
+                            mTextureName,
+                            0);
++    checkFramebufferStatus("FrameBuffer.cpp");
++    checkGlError("framebuffertexture2d");
+ 
+     if (!checkGlError("texture setup")) {
+         return false;
+@@ -94,6 +96,3 @@ int FrameBuffer::GetWidth() const {
+ int FrameBuffer::GetHeight() const {
+     return mHeight;
+ }
+-
+-
+-
+diff --git a/jni/feature_mos/src/mosaic_renderer/FrameBuffer.h b/jni/feature_mos/src/mosaic_renderer/FrameBuffer.h
+index b6a20ad1..314b1262 100755
+--- a/jni/feature_mos/src/mosaic_renderer/FrameBuffer.h
++++ b/jni/feature_mos/src/mosaic_renderer/FrameBuffer.h
+@@ -4,7 +4,10 @@
+ #include <GLES2/gl2.h>
+ #include <GLES2/gl2ext.h>
+ 
+-extern bool checkGlError(const char* op);
++#define checkGlError(op)  checkGLErrorDetail(__FILE__, __LINE__, (op))
++
++extern bool checkGLErrorDetail(const char* file, int line, const char* op);
++extern void checkFramebufferStatus(const char* name);
+ 
+ class FrameBuffer {
+   public:
+diff --git a/jni/feature_mos/src/mosaic_renderer/Renderer.cpp b/jni/feature_mos/src/mosaic_renderer/Renderer.cpp
+index c5c143f9..b9938eb6 100755
+--- a/jni/feature_mos/src/mosaic_renderer/Renderer.cpp
++++ b/jni/feature_mos/src/mosaic_renderer/Renderer.cpp
+@@ -87,7 +87,7 @@ GLuint Renderer::createProgram(const char* pVertexSource, const char* pFragmentS
+         GLint linkStatus = GL_FALSE;
+         glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
+ 
+-        LOGI("Program Linked!");
++        LOGI("Program Linked (%d)!", program);
+ 
+         if (linkStatus != GL_TRUE)
+         {
+diff --git a/jni/feature_stab/db_vlvm/db_utilities_constants.h b/jni/feature_stab/db_vlvm/db_utilities_constants.h
+index 612fc783..07565efd 100644
+--- a/jni/feature_stab/db_vlvm/db_utilities_constants.h
++++ b/jni/feature_stab/db_vlvm/db_utilities_constants.h
+@@ -64,7 +64,7 @@
+ #define DB_DEFAULT_ABS_CORNER_THRESHOLD 50000000.0
+ #define DB_DEFAULT_REL_CORNER_THRESHOLD 0.00005
+ #define DB_DEFAULT_MAX_DISPARITY 0.1
+-#define DB_DEFAULT_NO_DISPARITY (-1.0)
++#define DB_DEFAULT_NO_DISPARITY -1.0
+ #define DB_DEFAULT_MAX_TRACK_LENGTH 300
+ 
+ #define DB_DEFAULT_MAX_NR_CAMERAS 1000
+diff --git a/jni/feature_stab/src/dbreg/dbreg.cpp b/jni/feature_stab/src/dbreg/dbreg.cpp
+index dc7d58fe..da06aa2a 100644
+--- a/jni/feature_stab/src/dbreg/dbreg.cpp
++++ b/jni/feature_stab/src/dbreg/dbreg.cpp
+@@ -485,7 +485,8 @@ int db_FrameToReferenceRegistration::AddFrame(const unsigned char * const * im,
+   if(m_do_motion_smoothing)
+     SmoothMotion();
+ 
+-   db_PrintDoubleMatrix(m_H_ref_to_ins,3,3);
++   // Disable debug printing
++   // db_PrintDoubleMatrix(m_H_ref_to_ins,3,3);
+ 
+   db_Copy9(H, m_H_ref_to_ins);
+ 
+diff --git a/jni/feature_stab/src/dbreg/dbstabsmooth.cpp b/jni/feature_stab/src/dbreg/dbstabsmooth.cpp
+index dffff8ab..2bb5d2e5 100644
+--- a/jni/feature_stab/src/dbreg/dbstabsmooth.cpp
++++ b/jni/feature_stab/src/dbreg/dbstabsmooth.cpp
+@@ -136,7 +136,7 @@ bool db_StabilizationSmoother::smoothMotionAdaptive(/*VP_BIMG *bimg,*/int hsize,
+             smoothFactor = minSmoothFactor;
+ 
+         // Find the amount of motion that must be compensated so that no "border" pixels are seen in the stable video
+-        for (smoothFactor = smoothFactor; smoothFactor >= minSmoothFactor; smoothFactor -= 0.01) {
++        for (; smoothFactor >= minSmoothFactor; smoothFactor -= 0.01) {
+             // Compute the smoothed motion
+             if(!smoothMotion(inmot, &tmpMotion, smoothFactor))
+                 break;
+diff --git a/jni/feature_stab/src/dbreg/vp_motionmodel.h b/jni/feature_stab/src/dbreg/vp_motionmodel.h
+index 71a7f7e7..a63ac001 100644
+--- a/jni/feature_stab/src/dbreg/vp_motionmodel.h
++++ b/jni/feature_stab/src/dbreg/vp_motionmodel.h
+@@ -120,7 +120,7 @@ enum VP_MOTION_MODEL {
+   VP_VFE_AFFINE=120
+ };
+ 
+-#define VP_REFID (-1)   /* Default ID used for reference frame */
++#define VP_REFID -1   /* Default ID used for reference frame */
+ 
+ typedef struct {
+   VP_TRS par;            /* Contains the motion paramerers.
+@@ -205,16 +205,16 @@ typedef struct {
+ /* Warp a 2d point (assuming the z component is zero) */
+ #define VP_WARP_POINT_2D(inx,iny,m,outx,outy) do {\
+   VP_PAR vpTmpWarpPnt___= MWX(m)*(inx)+MWY(m)*(iny)+MWW(m); \
+-  (outx) = (MXX(m)*((VP_PAR)(inx))+MXY(m)*((VP_PAR)(iny))+MXW(m))/vpTmpWarpPnt___; \
+-  (outy) = (MYX(m)*((VP_PAR)(inx))+MYY(m)*((VP_PAR)(iny))+MYW(m))/vpTmpWarpPnt___; } while (0)
++  outx = (MXX(m)*((VP_PAR)inx)+MXY(m)*((VP_PAR)iny)+MXW(m))/vpTmpWarpPnt___; \
++  outy = (MYX(m)*((VP_PAR)inx)+MYY(m)*((VP_PAR)iny)+MYW(m))/vpTmpWarpPnt___; } while (0)
+ 
+ /* Warp a 3d point */
+ #define VP_WARP_POINT_3D(inx,iny,inz,m,outx,outy,outz) do {\
+-  VP_PAR vpTmpWarpPnt___= MWX(m)*(inx)+MWY(m)*(iny)+MWZ(m)*((VP_PAR)(inz))+MWW(m); \
+-  (outx) = (MXX(m)*((VP_PAR)(inx))+MXY(m)*((VP_PAR)(iny))+MXZ(m)*((VP_PAR)(inz))+MXW(m))/vpTmpWarpPnt___; \
+-  (outy) = (MYX(m)*((VP_PAR)(inx))+MYY(m)*((VP_PAR)(iny))+MYZ(m)*((VP_PAR)(inz))+MYW(m))/vpTmpWarpPnt___; \
+-  (outz) = MZX(m)*((VP_PAR)(inx))+MZY(m)*((VP_PAR)(iny))+MZZ(m)*((VP_PAR)(inz))+MZW(m); \
+-  if ((m).type==VP_MOTION_PROJ_3D) (outz)/=vpTmpWarpPnt___; } while (0)
++  VP_PAR vpTmpWarpPnt___= MWX(m)*(inx)+MWY(m)*(iny)+MWZ(m)*((VP_PAR)inz)+MWW(m); \
++  outx = (MXX(m)*((VP_PAR)inx)+MXY(m)*((VP_PAR)iny)+MXZ(m)*((VP_PAR)inz)+MXW(m))/vpTmpWarpPnt___; \
++  outy = (MYX(m)*((VP_PAR)inx)+MYY(m)*((VP_PAR)iny)+MYZ(m)*((VP_PAR)inz)+MYW(m))/vpTmpWarpPnt___; \
++  outz = MZX(m)*((VP_PAR)inx)+MZY(m)*((VP_PAR)iny)+MZZ(m)*((VP_PAR)inz)+MZW(m); \
++  if ((m).type==VP_MOTION_PROJ_3D) outz/=vpTmpWarpPnt___; } while (0)
+ 
+ /* Projections of each component */
+ #define VP_PROJW_3D(m,x,y,z,f)   ( MWX(m)*(x)+MWY(m)*(y)+MWZ(m)*(z)+MWW(m) )
+@@ -224,15 +224,15 @@ typedef struct {
+ 
+ /* Scale Down a matrix by Sfactor */
+ #define VP_SCALEDOWN(m,Sfactor) do { \
+-  MXW(m) /= (VP_PAR)(Sfactor); MWX(m) *= (VP_PAR)(Sfactor); \
+-  MYW(m) /= (VP_PAR)(Sfactor); MWY(m) *= (VP_PAR)(Sfactor); \
+-  MZW(m) /= (VP_PAR)(Sfactor); MWZ(m) *= (VP_PAR)(Sfactor); } while (0)
++  MXW(m) /= (VP_PAR)Sfactor; MWX(m) *= (VP_PAR)Sfactor; \
++  MYW(m) /= (VP_PAR)Sfactor; MWY(m) *= (VP_PAR)Sfactor; \
++  MZW(m) /= (VP_PAR)Sfactor; MWZ(m) *= (VP_PAR)Sfactor; } while (0)
+ 
+ /* Scale Up a matrix by Sfactor */
+ #define VP_SCALEUP(m,Sfactor) do { \
+-  MXW(m) *= (VP_PAR)(Sfactor); MWX(m) /= (VP_PAR)(Sfactor); \
+-  MYW(m) *= (VP_PAR)(Sfactor); MWY(m) /= (VP_PAR)(Sfactor); \
+-  MZW(m) *= (VP_PAR)(Sfactor); MWZ(m) /= (VP_PAR)(Sfactor); } while (0)
++  MXW(m) *= (VP_PAR)Sfactor; MWX(m) /= (VP_PAR)Sfactor; \
++  MYW(m) *= (VP_PAR)Sfactor; MWY(m) /= (VP_PAR)Sfactor; \
++  MZW(m) *= (VP_PAR)Sfactor; MWZ(m) /= (VP_PAR)Sfactor; } while (0)
+ 
+ /* Normalize the transformation matrix so that MWW is 1 */
+ #define VP_NORMALIZE(m) if (MWW(m)!=(VP_PAR)0.0) do { \
+@@ -253,15 +253,15 @@ typedef struct {
+ 
+ /* w' projection given a point x,y,0,f */
+ #define VP_PROJZ(m,x,y,f) ( \
+-    MWX(m)*((VP_PAR)(x))+MWY(m)*((VP_PAR)(y))+MWW(m)*((VP_PAR)(f)))
++    MWX(m)*((VP_PAR)x)+MWY(m)*((VP_PAR)y)+MWW(m)*((VP_PAR)f))
+ 
+ /* X Projection given a point x,y,0,f and w' */
+ #define VP_PROJX(m,x,y,w,f) (\
+-   (MXX(m)*((VP_PAR)(x))+MXY(m)*((VP_PAR)(y))+MXW(m)*((VP_PAR)(f)))/((VP_PAR)(w)))
++   (MXX(m)*((VP_PAR)x)+MXY(m)*((VP_PAR)y)+MXW(m)*((VP_PAR)f))/((VP_PAR)w))
+ 
+ /* Y Projection given a point x,y,0,f and the w' */
+ #define VP_PROJY(m,x,y,w,f) (\
+-  (MYX(m)*((VP_PAR)(x))+MYY(m)*((VP_PAR)(y))+MYW(m)*((VP_PAR)(f)))/((VP_PAR)(w)))
++  (MYX(m)*((VP_PAR)x)+MYY(m)*((VP_PAR)y)+MYW(m)*((VP_PAR)f))/((VP_PAR)w))
+ 
+ /* Set the reference id for a motion */
+ #define VP_SET_REFID(m,id) do { (m).refid=id; } while (0)
diff --git a/android_bench_suite/panorama_input/test_001.ppm b/android_bench_suite/panorama_input/test_001.ppm
new file mode 100644
index 0000000..e7218bf
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_001.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_002.ppm b/android_bench_suite/panorama_input/test_002.ppm
new file mode 100644
index 0000000..8975073
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_002.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_003.ppm b/android_bench_suite/panorama_input/test_003.ppm
new file mode 100644
index 0000000..58c9e34
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_003.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_004.ppm b/android_bench_suite/panorama_input/test_004.ppm
new file mode 100644
index 0000000..142c76b
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_004.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_005.ppm b/android_bench_suite/panorama_input/test_005.ppm
new file mode 100644
index 0000000..ff229d3
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_005.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_006.ppm b/android_bench_suite/panorama_input/test_006.ppm
new file mode 100644
index 0000000..2fc5c09
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_006.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_007.ppm b/android_bench_suite/panorama_input/test_007.ppm
new file mode 100644
index 0000000..d7f6a9a
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_007.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_008.ppm b/android_bench_suite/panorama_input/test_008.ppm
new file mode 100644
index 0000000..86d92b3
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_008.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_009.ppm b/android_bench_suite/panorama_input/test_009.ppm
new file mode 100644
index 0000000..72dd05f
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_009.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_010.ppm b/android_bench_suite/panorama_input/test_010.ppm
new file mode 100644
index 0000000..a09a054
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_010.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_011.ppm b/android_bench_suite/panorama_input/test_011.ppm
new file mode 100644
index 0000000..be7b61b
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_011.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_012.ppm b/android_bench_suite/panorama_input/test_012.ppm
new file mode 100644
index 0000000..67fad4a
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_012.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_013.ppm b/android_bench_suite/panorama_input/test_013.ppm
new file mode 100644
index 0000000..6d92fd1
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_013.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_014.ppm b/android_bench_suite/panorama_input/test_014.ppm
new file mode 100644
index 0000000..97aff41
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_014.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_015.ppm b/android_bench_suite/panorama_input/test_015.ppm
new file mode 100644
index 0000000..d1de251
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_015.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_016.ppm b/android_bench_suite/panorama_input/test_016.ppm
new file mode 100644
index 0000000..70ea1f5
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_016.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_017.ppm b/android_bench_suite/panorama_input/test_017.ppm
new file mode 100644
index 0000000..e075c9e
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_017.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_018.ppm b/android_bench_suite/panorama_input/test_018.ppm
new file mode 100644
index 0000000..adf023b
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_018.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_019.ppm b/android_bench_suite/panorama_input/test_019.ppm
new file mode 100644
index 0000000..1f27d1d
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_019.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_020.ppm b/android_bench_suite/panorama_input/test_020.ppm
new file mode 100644
index 0000000..fb95f52
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_020.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_021.ppm b/android_bench_suite/panorama_input/test_021.ppm
new file mode 100644
index 0000000..43baadf
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_021.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_022.ppm b/android_bench_suite/panorama_input/test_022.ppm
new file mode 100644
index 0000000..f928c83
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_022.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_023.ppm b/android_bench_suite/panorama_input/test_023.ppm
new file mode 100644
index 0000000..e21b275
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_023.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_024.ppm b/android_bench_suite/panorama_input/test_024.ppm
new file mode 100644
index 0000000..43ba0ba
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_024.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_025.ppm b/android_bench_suite/panorama_input/test_025.ppm
new file mode 100644
index 0000000..b9f8892
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_025.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_026.ppm b/android_bench_suite/panorama_input/test_026.ppm
new file mode 100644
index 0000000..201615f
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_026.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_027.ppm b/android_bench_suite/panorama_input/test_027.ppm
new file mode 100644
index 0000000..07cf426
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_027.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_028.ppm b/android_bench_suite/panorama_input/test_028.ppm
new file mode 100644
index 0000000..aedb023
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_028.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_029.ppm b/android_bench_suite/panorama_input/test_029.ppm
new file mode 100644
index 0000000..9a0d398
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_029.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_030.ppm b/android_bench_suite/panorama_input/test_030.ppm
new file mode 100644
index 0000000..26a8f53
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_030.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_031.ppm b/android_bench_suite/panorama_input/test_031.ppm
new file mode 100644
index 0000000..2300461
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_031.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_032.ppm b/android_bench_suite/panorama_input/test_032.ppm
new file mode 100644
index 0000000..f5e93f8
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_032.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_033.ppm b/android_bench_suite/panorama_input/test_033.ppm
new file mode 100644
index 0000000..c2f8ad9
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_033.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_034.ppm b/android_bench_suite/panorama_input/test_034.ppm
new file mode 100644
index 0000000..de93b23
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_034.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_035.ppm b/android_bench_suite/panorama_input/test_035.ppm
new file mode 100644
index 0000000..62198de
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_035.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_036.ppm b/android_bench_suite/panorama_input/test_036.ppm
new file mode 100644
index 0000000..bf252e4
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_036.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_037.ppm b/android_bench_suite/panorama_input/test_037.ppm
new file mode 100644
index 0000000..7cc7ace
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_037.ppm
Binary files differ
diff --git a/android_bench_suite/panorama_input/test_038.ppm b/android_bench_suite/panorama_input/test_038.ppm
new file mode 100644
index 0000000..d44e1f1
--- /dev/null
+++ b/android_bench_suite/panorama_input/test_038.ppm
Binary files differ
diff --git a/android_bench_suite/parse_result.py b/android_bench_suite/parse_result.py
new file mode 100644
index 0000000..90b3c4d
--- /dev/null
+++ b/android_bench_suite/parse_result.py
@@ -0,0 +1,114 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Helper functions to parse result collected from device"""
+
+from __future__ import print_function
+from fix_skia_results import _TransformBenchmarks
+
+import json
+
+def normalize(bench, dict_list):
+  bench_base = {
+      'Panorama': 1,
+      'Dex2oat': 1,
+      'Hwui': 10000,
+      'Skia': 1,
+      'Synthmark': 1,
+      'Binder': 0.001
+  }
+  result_dict = dict_list[0]
+  for key in result_dict:
+    result_dict[key] = result_dict[key] / bench_base[bench]
+  return [result_dict]
+
+
+# Functions to parse benchmark result for data collection.
+def parse_Panorama(bench, fin):
+  result_dict = {}
+  for line in fin:
+    words = line.split()
+    if 'elapsed' in words:
+      #TODO: Need to restructure the embedded word counts.
+      result_dict['total_time_s'] = float(words[3])
+      result_dict['retval'] = 0
+      return normalize(bench, [result_dict])
+  raise ValueError('You passed the right type of thing, '
+                   'but it didn\'t have the expected contents.')
+
+
+def parse_Synthmark(bench, fin):
+  result_dict = {}
+  accum = 0
+  cnt = 0
+  for line in fin:
+    words = line.split()
+    if 'normalized' in words:
+      #TODO: Need to restructure the embedded word counts.
+      accum += float(words[-1])
+      cnt += 1
+  if accum != 0:
+    result_dict['total_voices'] = accum / cnt
+    result_dict['retval'] = 0
+    return normalize(bench, [result_dict])
+  raise ValueError('You passed the right type of thing, '
+                   'but it didn\'t have the expected contents.')
+
+
+def parse_Binder(bench, fin):
+  result_dict = {}
+  accum = 0
+  cnt = 0
+  for line in fin:
+    words = line.split()
+    for word in words:
+      if 'average' in word:
+        #TODO: Need to restructure the embedded word counts.
+        accum += float(word[8:-2])
+        cnt += 1
+  if accum != 0:
+    result_dict['avg_time_ms'] = accum / cnt
+    result_dict['retval'] = 0
+    return normalize(bench, [result_dict])
+  raise ValueError('You passed the right type of thing, '
+                   'but it didn\'t have the expected contents.')
+
+
+def parse_Dex2oat(bench, fin):
+  result_dict = {}
+  cnt = 0
+  for line in fin:
+    words = line.split()
+    if 'elapsed' in words:
+      cnt += 1
+      #TODO: Need to restructure the embedded word counts.
+      if cnt == 1:
+        # First 'elapsed' time is for microbench 'Chrome'
+        result_dict['chrome_s'] = float(words[3])
+      elif cnt == 2:
+        # Second 'elapsed' time is for microbench 'Camera'
+        result_dict['camera_s'] = float(words[3])
+
+        result_dict['retval'] = 0
+        # Two results found, return
+        return normalize(bench, [result_dict])
+  raise ValueError('You passed the right type of thing, '
+                   'but it didn\'t have the expected contents.')
+
+
+def parse_Hwui(bench, fin):
+  result_dict = {}
+  for line in fin:
+    words = line.split()
+    if 'elapsed' in words:
+      #TODO: Need to restructure the embedded word counts.
+      result_dict['total_time_s'] = float(words[3])
+      result_dict['retval'] = 0
+      return normalize(bench, [result_dict])
+  raise ValueError('You passed the right type of thing, '
+                   'but it didn\'t have the expected contents.')
+
+
+def parse_Skia(bench, fin):
+  obj = json.load(fin)
+  return normalize(bench, _TransformBenchmarks(obj))
diff --git a/android_bench_suite/run.py b/android_bench_suite/run.py
new file mode 100755
index 0000000..55acb66
--- /dev/null
+++ b/android_bench_suite/run.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# pylint: disable=cros-logging-import
+
+# This is the script to run specified benchmark with different toolchain
+# settings. It includes the process of building benchmark locally and running
+# benchmark on DUT.
+
+"""Main script to run the benchmark suite from building to testing."""
+from __future__ import print_function
+
+import argparse
+import config
+import ConfigParser
+import logging
+import os
+import subprocess
+import sys
+
+logging.basicConfig(level=logging.INFO)
+
+def _parse_arguments(argv):
+  parser = argparse.ArgumentParser(description='Build and run specific '
+                                   'benchamrk')
+  parser.add_argument(
+      '-b',
+      '--bench',
+      action='append',
+      default=[],
+      help='Select which benchmark to run')
+
+  # Only one of compiler directory and llvm prebuilts version can be indicated
+  # at the beginning, so set -c and -l into a exclusive group.
+  group = parser.add_mutually_exclusive_group()
+
+  # The toolchain setting arguments has action of 'append', so that users
+  # could compare performance with several toolchain settings together.
+  group.add_argument(
+      '-c',
+      '--compiler_dir',
+      metavar='DIR',
+      action='append',
+      default=[],
+      help='Specify path to the compiler\'s bin directory. '
+      'You shall give several paths, each with a -c, to '
+      'compare performance differences in '
+      'each compiler.')
+
+  parser.add_argument(
+      '-o',
+      '--build_os',
+      action='append',
+      default=[],
+      help='Specify the host OS to build the benchmark.')
+
+  group.add_argument(
+      '-l',
+      '--llvm_prebuilts_version',
+      action='append',
+      default=[],
+      help='Specify the version of prebuilt LLVM. When '
+      'specific prebuilt version of LLVM already '
+      'exists, no need to pass the path to compiler '
+      'directory.')
+
+  parser.add_argument(
+      '-f',
+      '--cflags',
+      action='append',
+      default=[],
+      help='Specify the cflags options for the toolchain. '
+      'Be sure to quote all the cflags with quotation '
+      'mark("") or use equal(=).')
+  parser.add_argument(
+      '--ldflags',
+      action='append',
+      default=[],
+      help='Specify linker flags for the toolchain.')
+
+  parser.add_argument(
+      '-i',
+      '--iterations',
+      type=int,
+      default=1,
+      help='Specify how many iterations does the test '
+      'take.')
+
+  # Arguments -s and -r are for connecting to DUT.
+  parser.add_argument(
+      '-s',
+      '--serials',
+      help='Comma separate list of device serials under '
+      'test.')
+
+  parser.add_argument(
+      '-r',
+      '--remote',
+      default='localhost',
+      help='hostname[:port] if the ADB device is connected '
+      'to a remote machine. Ensure this workstation '
+      'is configured for passwordless ssh access as '
+      'users "root" or "adb"')
+
+  # Arguments -frequency and -m are for device settings
+  parser.add_argument(
+      '--frequency',
+      type=int,
+      default=960000,
+      help='Specify the CPU frequency of the device. The '
+      'unit is KHZ. The available value is defined in'
+      'cpufreq/scaling_available_frequency file in '
+      'device\'s each core directory. '
+      'The default value is 960000, which shows a '
+      'balance in noise and performance. Lower '
+      'frequency will slow down the performance but '
+      'reduce noise.')
+
+  parser.add_argument(
+      '-m',
+      '--mode',
+      default='little',
+      help='User can specify whether \'little\' or \'big\' '
+      'mode to use. The default one is little mode. '
+      'The little mode runs on a single core of '
+      'Cortex-A53, while big mode runs on single core '
+      'of Cortex-A57.')
+
+  # Configure file for benchmark test
+  parser.add_argument(
+      '-t',
+      '--test',
+      help='Specify the test settings with configuration '
+      'file.')
+
+  # Whether to keep old json result or not
+  parser.add_argument(
+      '-k',
+      '--keep',
+      default='False',
+      help='User can specify whether to keep the old json '
+      'results from last run. This can be useful if you '
+      'want to compare performance differences in two or '
+      'more different runs. Default is False(off).')
+
+  return parser.parse_args(argv)
+
+
+# Clear old log files in bench suite directory
+def clear_logs():
+  logging.info('Removing old logfiles...')
+  for f in ['build_log', 'device_log', 'test_log']:
+    logfile = os.path.join(config.bench_suite_dir, f)
+    try:
+      os.remove(logfile)
+    except OSError:
+      logging.info('No logfile %s need to be removed. Ignored.', f)
+  logging.info('Old logfiles been removed.')
+
+
+# Clear old json files in bench suite directory
+def clear_results():
+  logging.info('Clearing old json results...')
+  for bench in config.bench_list:
+    result = os.path.join(config.bench_suite_dir, bench + '.json')
+    try:
+      os.remove(result)
+    except OSError:
+      logging.info('no %s json file need to be removed. Ignored.', bench)
+  logging.info('Old json results been removed.')
+
+
+# Use subprocess.check_call to run other script, and put logs to files
+def check_call_with_log(cmd, log_file):
+  log_file = os.path.join(config.bench_suite_dir, log_file)
+  with open(log_file, 'a') as logfile:
+    log_header = 'Log for command: %s\n' % (cmd)
+    logfile.write(log_header)
+    try:
+      subprocess.check_call(cmd, stdout=logfile)
+    except subprocess.CalledProcessError:
+      logging.error('Error running %s, please check %s for more info.', cmd,
+                    log_file)
+      raise
+  logging.info('Logs for %s are written to %s.', cmd, log_file)
+
+
+def set_device(serials, remote, frequency):
+  setting_cmd = [
+      os.path.join(
+          os.path.join(config.android_home, config.autotest_dir),
+          'site_utils/set_device.py')
+  ]
+  setting_cmd.append('-r=' + remote)
+  setting_cmd.append('-q=' + str(frequency))
+
+  # Deal with serials.
+  # If there is no serails specified, try to run test on the only device.
+  # If specified, split the serials into a list and run test on each device.
+  if serials:
+    for serial in serials.split(','):
+      setting_cmd.append('-s=' + serial)
+      check_call_with_log(setting_cmd, 'device_log')
+      setting_cmd.pop()
+  else:
+    check_call_with_log(setting_cmd, 'device_log')
+
+  logging.info('CPU mode and frequency set successfully!')
+
+
+def log_ambiguous_args():
+  logging.error('The count of arguments does not match!')
+  raise ValueError('The count of arguments does not match.')
+
+
+# Check if the count of building arguments are log_ambiguous or not.  The
+# number of -c/-l, -f, and -os should be either all 0s or all the same.
+def check_count(compiler, llvm_version, build_os, cflags, ldflags):
+  # Count will be set to 0 if no compiler or llvm_version specified.
+  # Otherwise, one of these two args length should be 0 and count will be
+  # the other one.
+  count = max(len(compiler), len(llvm_version))
+
+  # Check if number of cflags is 0 or the same with before.
+  if len(cflags) != 0:
+    if count != 0 and len(cflags) != count:
+      log_ambiguous_args()
+    count = len(cflags)
+
+  if len(ldflags) != 0:
+    if count != 0 and len(ldflags) != count:
+      log_ambiguous_args()
+    count = len(ldflags)
+
+  if len(build_os) != 0:
+    if count != 0 and len(build_os) != count:
+      log_ambiguous_args()
+    count = len(build_os)
+
+  # If no settings are passed, only run default once.
+  return max(1, count)
+
+
+# Build benchmark binary with toolchain settings
+def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
+                ldflags):
+  # Build benchmark locally
+  build_cmd = ['./build_bench.py', '-b=' + bench]
+  if compiler:
+    build_cmd.append('-c=' + compiler[setting_no])
+  if llvm_version:
+    build_cmd.append('-l=' + llvm_version[setting_no])
+  if build_os:
+    build_cmd.append('-o=' + build_os[setting_no])
+  if cflags:
+    build_cmd.append('-f=' + cflags[setting_no])
+  if ldflags:
+    build_cmd.append('--ldflags=' + ldflags[setting_no])
+
+  logging.info('Building benchmark for toolchain setting No.%d...', setting_no)
+  logging.info('Command: %s', build_cmd)
+
+  try:
+    subprocess.check_call(build_cmd)
+  except:
+    logging.error('Error while building benchmark!')
+    raise
+
+
+def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
+
+  # Run autotest script for benchmark on DUT
+  check_call_with_log(test_cmd, 'test_log')
+
+  logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
+               'device %s.', setting_no, i, serial)
+
+  # Rename results from the bench_result generated in autotest
+  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
+  if not os.path.exists(bench_result):
+    logging.error('No result found at %s, '
+                  'please check test_log for details.', bench_result)
+    raise OSError('Result file %s not found.' % bench_result)
+
+  new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial, setting_no, i)
+  new_bench_result_path = os.path.join(config.bench_suite_dir, new_bench_result)
+  try:
+    os.rename(bench_result, new_bench_result_path)
+  except OSError:
+    logging.error('Error while renaming raw result %s to %s', bench_result,
+                  new_bench_result_path)
+    raise
+
+  logging.info('Benchmark result saved at %s.', new_bench_result_path)
+
+
+def test_bench(bench, setting_no, iterations, serials, remote, mode):
+  logging.info('Start running benchmark on device...')
+
+  # Run benchmark and tests on DUT
+  for i in xrange(iterations):
+    logging.info('Iteration No.%d:', i)
+    test_cmd = [
+        os.path.join(
+            os.path.join(config.android_home, config.autotest_dir),
+            'site_utils/test_bench.py')
+    ]
+    test_cmd.append('-b=' + bench)
+    test_cmd.append('-r=' + remote)
+    test_cmd.append('-m=' + mode)
+
+    # Deal with serials.
+    # If there is no serails specified, try to run test on the only device.
+    # If specified, split the serials into a list and run test on each device.
+    if serials:
+      for serial in serials.split(','):
+        test_cmd.append('-s=' + serial)
+
+        run_and_collect_result(test_cmd, setting_no, i, bench, serial)
+        test_cmd.pop()
+    else:
+      run_and_collect_result(test_cmd, setting_no, i, bench)
+
+
+def gen_json(bench, setting_no, iterations, serials):
+  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
+
+  logging.info('Generating JSON file for Crosperf...')
+
+  if not serials:
+    serials = 'default'
+
+  for serial in serials.split(','):
+
+    # Platform will be used as device lunch combo instead
+    #experiment = '_'.join([serial, str(setting_no)])
+    experiment = config.product_combo
+
+    # Input format: bench_result_{bench}_{serial}_{setting_no}_
+    input_file = '_'.join([bench_result, bench, serial, str(setting_no), ''])
+    gen_json_cmd = [
+        './gen_json.py', '--input=' + input_file,
+        '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
+        '--bench=' + bench, '--platform=' + experiment,
+        '--iterations=' + str(iterations)
+    ]
+
+    logging.info('Command: %s', gen_json_cmd)
+    if subprocess.call(gen_json_cmd):
+      logging.error('Error while generating JSON file, please check raw data'
+                    'of the results at %s.', input_file)
+
+
+def gen_crosperf(infile, outfile):
+  # Set environment variable for crosperf
+  os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
+
+  logging.info('Generating Crosperf Report...')
+  crosperf_cmd = [
+      os.path.join(config.toolchain_utils, 'generate_report.py'),
+      '-i=' + infile, '-o=' + outfile, '-f'
+  ]
+
+  # Run crosperf generate_report.py
+  logging.info('Command: %s', crosperf_cmd)
+  subprocess.call(crosperf_cmd)
+
+  logging.info('Report generated successfully!')
+  logging.info('Report Location: ' + outfile + '.html at bench'
+               'suite directory.')
+
+
+def main(argv):
+  # Set environment variable for the local loacation of benchmark suite.
+  # This is for collecting testing results to benchmark suite directory.
+  os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
+
+  # Set Android type, used for the difference part between aosp and internal.
+  os.environ['ANDROID_TYPE'] = config.android_type
+
+  # Set ANDROID_HOME for both building and testing.
+  os.environ['ANDROID_HOME'] = config.android_home
+
+  # Set environment variable for architecture, this will be used in
+  # autotest.
+  os.environ['PRODUCT'] = config.product
+
+  arguments = _parse_arguments(argv)
+
+  bench_list = arguments.bench
+  if not bench_list:
+    bench_list = config.bench_list
+
+  compiler = arguments.compiler_dir
+  build_os = arguments.build_os
+  llvm_version = arguments.llvm_prebuilts_version
+  cflags = arguments.cflags
+  ldflags = arguments.ldflags
+  iterations = arguments.iterations
+  serials = arguments.serials
+  remote = arguments.remote
+  frequency = arguments.frequency
+  mode = arguments.mode
+  keep = arguments.keep
+
+  # Clear old logs every time before run script
+  clear_logs()
+
+  if keep == 'False':
+    clear_results()
+
+  # Set test mode and frequency of CPU on the DUT
+  set_device(serials, remote, frequency)
+
+  test = arguments.test
+  # if test configuration file has been given, use the build settings
+  # in the configuration file and run the test.
+  if test:
+    test_config = ConfigParser.ConfigParser(allow_no_value=True)
+    if not test_config.read(test):
+      logging.error('Error while reading from building '
+                    'configuration file %s.', test)
+      raise RuntimeError('Error while reading configuration file %s.' % test)
+
+    for setting_no, section in enumerate(test_config.sections()):
+      bench = test_config.get(section, 'bench')
+      compiler = [test_config.get(section, 'compiler')]
+      build_os = [test_config.get(section, 'build_os')]
+      llvm_version = [test_config.get(section, 'llvm_version')]
+      cflags = [test_config.get(section, 'cflags')]
+      ldflags = [test_config.get(section, 'ldflags')]
+
+      # Set iterations from test_config file, if not exist, use the one from
+      # command line.
+      it = test_config.get(section, 'iterations')
+      if not it:
+        it = iterations
+      it = int(it)
+
+      # Build benchmark for each single test configuration
+      build_bench(0, bench, compiler, llvm_version, build_os, cflags, ldflags)
+
+      test_bench(bench, setting_no, it, serials, remote, mode)
+
+      gen_json(bench, setting_no, it, serials)
+
+    for bench in config.bench_list:
+      infile = os.path.join(config.bench_suite_dir, bench + '.json')
+      if os.path.exists(infile):
+        outfile = os.path.join(config.bench_suite_dir, bench + '_report')
+        gen_crosperf(infile, outfile)
+
+    # Stop script if there is only config file provided
+    return 0
+
+  # If no configuration file specified, continue running.
+  # Check if the count of the setting arguments are log_ambiguous.
+  setting_count = check_count(compiler, llvm_version, build_os, cflags, ldflags)
+
+  for bench in bench_list:
+    logging.info('Start building and running benchmark: [%s]', bench)
+    # Run script for each toolchain settings
+    for setting_no in xrange(setting_count):
+      build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
+                  ldflags)
+
+      # Run autotest script for benchmark test on device
+      test_bench(bench, setting_no, iterations, serials, remote, mode)
+
+      gen_json(bench, setting_no, iterations, serials)
+
+    infile = os.path.join(config.bench_suite_dir, bench + '.json')
+    outfile = os.path.join(config.bench_suite_dir, bench + '_report')
+    gen_crosperf(infile, outfile)
+
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/android_bench_suite/set_flags.py b/android_bench_suite/set_flags.py
new file mode 100644
index 0000000..a243c7c
--- /dev/null
+++ b/android_bench_suite/set_flags.py
@@ -0,0 +1,128 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Helper functions to put user defined flags to mk/bp files"""
+
+from __future__ import print_function
+
+import config
+import os
+import subprocess
+
+
+# Find the makefile/blueprint based on the benchmark, and make a copy of
+# it for restoring later.
+def backup_file(bench, file_type):
+  mk_file = os.path.join(config.android_home, config.bench_dict[bench],
+                         'Android.' + file_type)
+  try:
+    # Make a copy of the makefile/blueprint so that we can recover it after
+    # building the benchmark
+    subprocess.check_call([
+        'cp', mk_file,
+        os.path.join(config.android_home, config.bench_dict[bench],
+                     'tmp_makefile')
+    ])
+  except subprocess.CalledProcessError():
+    raise OSError('Cannot backup Android.%s file for %s' % (file_type, bench))
+
+
+# Insert lines to add LOCAL_CFLAGS/LOCAL_LDFLAGS to the benchmarks
+# makefile/blueprint
+def replace_flags(bench, android_type, file_type, cflags, ldflags):
+  # Use format ["Flag1", "Flag2"] for bp file
+  if file_type == 'bp':
+    if cflags:
+      cflags = '\", \"'.join(cflags.split())
+    if ldflags:
+      ldflags = '\", \"'.join(ldflags.split())
+
+  if not cflags:
+    cflags = ''
+  else:
+    cflags = '\"' + cflags + '\",'
+  if not ldflags:
+    ldflags = ''
+  else:
+    ldflags = '\"' + ldflags + '\",'
+
+  # Two different diffs are used for aosp or internal android repo.
+  if android_type == 'aosp':
+    bench_diff = bench + '_flags_aosp.diff'
+  else:
+    bench_diff = bench + '_flags_internal.diff'
+
+  # Replace CFLAGS_FOR_BENCH_SUITE marker with proper cflags
+  output = ''
+  with open(bench_diff) as f:
+    for line in f:
+      line = line.replace('CFLAGS_FOR_BENCH_SUITE', cflags)
+      line = line.replace('LDFLAGS_FOR_BENCH_SUITE', ldflags)
+      output += line
+
+  with open('modified.diff', 'w') as f:
+    f.write(output)
+
+
+def apply_patches(bench):
+  bench_dir = os.path.join(config.android_home, config.bench_dict[bench])
+  bench_diff = 'modified.diff'
+  flags_patch = os.path.join(
+      os.path.dirname(os.path.realpath(__file__)), bench_diff)
+  try:
+    subprocess.check_call(['git', '-C', bench_dir, 'apply', flags_patch])
+  except subprocess.CalledProcessError:
+    raise OSError('Patch for adding flags for %s does not succeed.' % (bench))
+
+
+def replace_flags_in_dir(bench, cflags, ldflags):
+  bench_mk = os.path.join(config.android_home, config.bench_dict[bench],
+                          'Android.mk')
+
+  if not cflags:
+    cflags = ''
+  if not ldflags:
+    ldflags = ''
+
+  output = ''
+  with open(bench_mk) as f:
+    for line in f:
+      line = line.replace('$(CFLAGS_FOR_BENCH_SUITE)', cflags)
+      line = line.replace('$(LDFLAGS_FOR_BENCH_SUITE)', ldflags)
+      output += line
+  with open(bench_mk, 'w') as f:
+    f.write(output)
+
+
+def add_flags_Panorama(cflags, ldflags):
+  backup_file('Panorama', 'mk')
+  replace_flags_in_dir('Panorama', cflags, ldflags)
+
+
+def add_flags_Synthmark(cflags, ldflags):
+  backup_file('Synthmark', 'mk')
+  replace_flags_in_dir('Synthmark', cflags, ldflags)
+
+
+def add_flags_Skia(cflags, ldflags):
+  backup_file('Skia', 'bp')
+  replace_flags('Skia', config.android_type, 'bp', cflags, ldflags)
+  apply_patches('Skia')
+
+
+def add_flags_Binder(cflags, ldflags):
+  backup_file('Binder', 'bp')
+  replace_flags('Binder', config.android_type, 'bp', cflags, ldflags)
+  apply_patches('Binder')
+
+
+def add_flags_Hwui(cflags, ldflags):
+  backup_file('Hwui', 'bp')
+  replace_flags('Hwui', config.android_type, 'bp', cflags, ldflags)
+  apply_patches('Hwui')
+
+
+def add_flags_Dex2oat(cflags, ldflags):
+  backup_file('Dex2oat', 'bp')
+  replace_flags('Dex2oat', config.android_type, 'bp', cflags, ldflags)
+  apply_patches('Dex2oat')
diff --git a/android_bench_suite/skia_aosp.diff b/android_bench_suite/skia_aosp.diff
new file mode 100644
index 0000000..269e02a
--- /dev/null
+++ b/android_bench_suite/skia_aosp.diff
@@ -0,0 +1,62 @@
+diff --git a/bench/ResultsWriter.h b/bench/ResultsWriter.h
+index f56deae..69a84c7 100644
+--- a/bench/ResultsWriter.h
++++ b/bench/ResultsWriter.h
+@@ -46,6 +46,9 @@ public:
+     // Record a single test metric.
+     virtual void metric(const char name[], double ms) {}
+ 
++    // Record a list of test metrics.
++    virtual void metrics(const char name[], const SkTArray<double> &array) {}
++
+     // Flush to storage now please.
+     virtual void flush() {}
+ };
+@@ -113,6 +116,17 @@ public:
+         SkASSERT(fConfig);
+         (*fConfig)[name] = ms;
+     }
++    void metrics(const char name[], const SkTArray<double> &array) override {
++        // The user who wrote this feature prefers NaNs over not having results.
++        // Hence, this ignores whether we have NaNs.
++        SkASSERT(fConfig);
++        Json::Value value = Json::Value(Json::arrayValue);
++        value.resize(array.count());
++        for (unsigned i = 0, e = array.count(); i != e; ++i) {
++          value[i] = array[i];
++        }
++        (*fConfig)[name] = value;
++    }
+ 
+     // Flush to storage now please.
+     void flush() override {
+diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
+index ae415fa..22011cd 100644
+--- a/bench/nanobench.cpp
++++ b/bench/nanobench.cpp
+@@ -42,6 +42,7 @@
+ #include "SkSurface.h"
+ #include "SkTaskGroup.h"
+ #include "SkThreadUtils.h"
++#include "SkTypes.h"
+ #include "ThermalManager.h"
+ 
+ #include <stdlib.h>
+@@ -1173,7 +1174,7 @@ int nanobench_main() {
+             target->setup();
+             bench->perCanvasPreDraw(canvas);
+ 
+-            int maxFrameLag;
++            int maxFrameLag = 0;
+             int loops = target->needsFrameTiming(&maxFrameLag)
+                 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
+                 : setup_cpu_bench(overhead, target, bench.get());
+@@ -1197,6 +1198,7 @@ int nanobench_main() {
+             benchStream.fillCurrentOptions(log.get());
+             target->fillOptions(log.get());
+             log->metric("min_ms",    stats.min);
+             log->metric("median_ms", stats.median);
++            log->metrics("samples",    samples);
+ #if SK_SUPPORT_GPU
+             if (gpuStatsDump) {
+                 // dump to json, only SKPBench currently returns valid keys / values
diff --git a/android_bench_suite/skia_internal.diff b/android_bench_suite/skia_internal.diff
new file mode 100644
index 0000000..f6b1961
--- /dev/null
+++ b/android_bench_suite/skia_internal.diff
@@ -0,0 +1,61 @@
+diff --git a/bench/ResultsWriter.h b/bench/ResultsWriter.h
+index f56deae..69a84c7 100644
+--- a/bench/ResultsWriter.h
++++ b/bench/ResultsWriter.h
+@@ -46,6 +46,9 @@ public:
+     // Record a single test metric.
+     virtual void metric(const char name[], double ms) {}
+ 
++    // Record a list of test metrics.
++    virtual void metrics(const char name[], const SkTArray<double> &array) {}
++
+     // Flush to storage now please.
+     virtual void flush() {}
+ };
+@@ -113,6 +116,17 @@ public:
+         SkASSERT(fConfig);
+         (*fConfig)[name] = ms;
+     }
++    void metrics(const char name[], const SkTArray<double> &array) override {
++        // The user who wrote this feature prefers NaNs over not having results.
++        // Hence, this ignores whether we have NaNs.
++        SkASSERT(fConfig);
++        Json::Value value = Json::Value(Json::arrayValue);
++        value.resize(array.count());
++        for (unsigned i = 0, e = array.count(); i != e; ++i) {
++          value[i] = array[i];
++        }
++        (*fConfig)[name] = value;
++    }
+ 
+     // Flush to storage now please.
+     void flush() override {
+diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
+index 0651302..0623d61 100644
+--- a/bench/nanobench.cpp
++++ b/bench/nanobench.cpp
+@@ -43,6 +43,7 @@
+ #include "SkSVGDOM.h"
+ #include "SkTaskGroup.h"
+ #include "SkThreadUtils.h"
++#include "SkTypes.h"
+ #include "ThermalManager.h"
+ #include "SkScan.h"
+ 
+@@ -1240,7 +1241,7 @@ int nanobench_main() {
+             target->setup();
+             bench->perCanvasPreDraw(canvas);
+ 
+-            int maxFrameLag;
++            int maxFrameLag = 0;
+             int loops = target->needsFrameTiming(&maxFrameLag)
+                 ? setup_gpu_bench(target, bench.get(), maxFrameLag)
+                 : setup_cpu_bench(overhead, target, bench.get());
+@@ -1290,6 +1291,7 @@ int nanobench_main() {
+             benchStream.fillCurrentOptions(log.get());
+             target->fillOptions(log.get());
+             log->metric("min_ms",    stats.min);
++            log->metrics("samples",  samples);
+ #if SK_SUPPORT_GPU
+             if (gpuStatsDump) {
+                 // dump to json, only SKPBench currently returns valid keys / values
diff --git a/android_bench_suite/synthmark.diff b/android_bench_suite/synthmark.diff
new file mode 100644
index 0000000..db87cb9
--- /dev/null
+++ b/android_bench_suite/synthmark.diff
@@ -0,0 +1,22 @@
+diff --git a/Android.mk b/Android.mk
+index e1d89db..3970857 100644
+--- a/Android.mk
++++ b/Android.mk
+@@ -1 +1,16 @@
+-# This file is intentionally empty, to prevent a platform build from descending further
++# Copyright 2017 The Chromium OS Authors. All rights reserved.
++# Use of this source code is governed by a BSD-style license that can be
++# found in the LICENSE file.
++
++LOCAL_PATH := $(call my-dir)
++
++include $(CLEAR_VARS)
++LOCAL_MODULE_TAGS := tests
++LOCAL_C_INCLUDES := $(LOCAL_PATH)/source
++LOCAL_SRC_FILES:= apps/synthmark.cpp
++LOCAL_CFLAGS += -g -std=c++11 -Ofast
++LOCAL_CFLAGS += $(CFLAGS_FOR_BENCH_SUITE)
++LOCAL_LDFLAGS += $(LDFLAGS_FOR_BENCH_SUITE)
++#LOCAL_SHARED_LIBRARIES := libcutils libutils
++LOCAL_MODULE := synthmark
++include $(BUILD_EXECUTABLE)
diff --git a/android_bench_suite/test_config b/android_bench_suite/test_config
new file mode 100644
index 0000000..ae2cff2
--- /dev/null
+++ b/android_bench_suite/test_config
@@ -0,0 +1,57 @@
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[Hwui_Test]
+bench = Hwui
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
+
+[Skia_Test]
+bench = Skia
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
+
+[Synthmark_Test]
+bench = Synthmark
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
+
+[Binder_Test]
+bench = Binder
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
+
+[Panorama_Test]
+bench = Panorama
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
+
+[Dex2oat_Test]
+bench = Dex2oat
+compiler =
+build_os =
+llvm_version =
+cflags =
+ldflags =
+iterations = 1
diff --git a/binary_search_tool/README.testing b/binary_search_tool/README.testing
new file mode 100644
index 0000000..6c81ab9
--- /dev/null
+++ b/binary_search_tool/README.testing
@@ -0,0 +1,80 @@
+This file explains how to set up and run the various kinds of bisection tests.
+
+The bisection tool comes with several sets of tests which you should
+run after updating any of the bisection tool scripts OR after updating
+the Android compiler wrapper (to make sure the wrapper will still work
+correctly with bisection).
+
+Before you start.
+----------------
+
+Before you can run the tests, your PYTHONPATH environment variable
+must be correct.  This means that it must include both the
+toolchain-utils directory and the binary_search_tool directory.  The
+easiest way to set it is:
+
+$ cd toolchain-utils
+$ export PYTHONPATH=`pwd`:${PYTHONPATH}
+$ cd binary_search_tool
+$ export PYTHONPATH=`pwd`:${PYTHONPATH}
+
+
+Running the unittests.
+----------------------
+
+To run the basic unit tests:
+
+$ cd toolchain-utils/binary_search_tool/test
+$ ./binary_search_tool_tester.py
+
+Running the bisection tests, testing the compiler wrapper.
+----------------------------------------------------------
+
+If you want to run the bisection tests, and test the compiler wrapper
+(to make sure the POPULATE_GOOD and POPULATE_BAD stages are still
+working properly) you can do the following.
+
+If you are testing with the ANDROID COMPILER WRAPPER, you need to to some
+preliminary setup:
+
+Set up the compiler wrapper to replace GCC:
+
+    $ cd <android-root/prebuilts/clang/host/linux-x86/clang-368880/bin
+    $ cp clang gcc
+    $ whereis gcc
+    gcc: /usr/bin/gcc /usr/lib/gcc /usr/bin/X11/gcc /usr/share/man/man1/gcc.1.gz
+    $ cd /usr/bin
+    $ ls -l gcc
+    lrwxrwxrwx 1 root root 7 Feb  3 17:00 gcc -> gcc-4.8*
+    $ sudo mv gcc gcc.real
+    $ sudo ln -s <android-root>/prebuilts/clang/host/linux-x86/clang-3688880/bin/gcc gcc
+
+Move to the correct directory, then run the test script:
+
+    $ cd toolchain-utils/binary_search_tool
+    $ ./run_bisect_test.py
+
+
+If you are testing with the CHROMEOS COMPILER WRAPPER, you MUST run the
+tests from INSIDE your CHROOT (but you don't need to do any special setup):
+
+    $ cd <path-to-chromeos-root>
+    $ cros_sdk
+    $ cd ~/trunk/src/third_party/toolchain-utils
+
+    Set up your PYTHONPATH:
+
+    $ export PYTHONPATH=`pwd`:${PYTHONPATH}
+    $ cd binary_search_tool
+    $ export PYTHONPATH=`pwd`:${PYTHONPATH}
+
+    Run the test script:
+
+    $ ./run_bisect_test.py
+
+
+Running the bisection tests, without testing the compiler wrapper.
+------------------------------------------------------------------
+
+$ cd toolchain-utils/binary_search_tool
+$ ./full_bisect_test/run-test-nowrapper.sh
diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py
index 7ac2fba..aaa09ee 100755
--- a/binary_search_tool/binary_search_perforce.py
+++ b/binary_search_tool/binary_search_perforce.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Module of binary serch for perforce."""
 from __future__ import print_function
 
@@ -368,8 +368,8 @@
     self.CleanupCLs()
     # Change the revision of only the gcc part of the toolchain.
     command = ('cd %s/gcctools/google_vendor_src_branch/gcc '
-               '&& g4 revert ...; g4 sync @%s' %
-               (self.checkout_dir, current_revision))
+               '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir,
+                                                  current_revision))
     self.current_ce.RunCommand(command)
 
     self.HandleBrokenCLs(current_revision)
@@ -402,11 +402,13 @@
       '-s', '--script', dest='script', help='Script to run for every version.')
   options = parser.parse_args(argv)
   # First get all revisions
-  p4_paths = ['//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
-              '//depot2/gcctools/google_vendor_src_branch/binutils/'
-              'binutils-2.20.1-mobile/...',
-              '//depot2/gcctools/google_vendor_src_branch/'
-              'binutils/binutils-20100303/...']
+  p4_paths = [
+      '//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
+      '//depot2/gcctools/google_vendor_src_branch/binutils/'
+      'binutils-2.20.1-mobile/...',
+      '//depot2/gcctools/google_vendor_src_branch/'
+      'binutils/binutils-20100303/...'
+  ]
   p4gccbs = P4GCCBinarySearcher('perforce2:2666', p4_paths, '')
 
   # Main loop:
@@ -425,8 +427,8 @@
       ce = command_executer.GetCommandExecuter()
       command = '%s %s' % (script, p4gccbs.checkout_dir)
       status = ce.RunCommand(command)
-      message = ('Revision: %s produced: %d status\n' %
-                 (current_revision, status))
+      message = ('Revision: %s produced: %d status\n' % (current_revision,
+                                                         status))
       logger.GetLogger().LogOutput(message, print_to_console=verbose)
       terminated = p4gccbs.SetStatus(status)
       num_tries -= 1
diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py
index a10e90b..1906525 100755
--- a/binary_search_tool/binary_search_state.py
+++ b/binary_search_tool/binary_search_state.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """The binary search wrapper."""
 
 from __future__ import print_function
@@ -559,8 +559,8 @@
   try:
     bss.DoSearch()
     bss.RemoveState()
-    logger.GetLogger().LogOutput('Total execution time: %s' %
-                                 bss.ElapsedTimeString())
+    logger.GetLogger().LogOutput(
+        'Total execution time: %s' % bss.ElapsedTimeString())
   except Error as e:
     logger.GetLogger().LogError(e)
     return 1
diff --git a/binary_search_tool/bisect.py b/binary_search_tool/bisect.py
index d5a8b71..c7dd523 100755
--- a/binary_search_tool/bisect.py
+++ b/binary_search_tool/bisect.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """The unified package/object bisecting tool."""
 
 from __future__ import print_function
@@ -193,9 +193,9 @@
     if options.dir:
       os.environ['BISECT_DIR'] = options.dir
     self.options.dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
-    self.setup_cmd = ('%s %s %s %s' % (self.sysroot_wrapper_setup,
-                                       self.options.board, self.options.remote,
-                                       self.options.package))
+    self.setup_cmd = ('%s %s %s %s' %
+                      (self.sysroot_wrapper_setup, self.options.board,
+                       self.options.remote, self.options.package))
 
     self.ArgOverride(self.default_kwargs, overrides)
 
diff --git a/binary_search_tool/common/interactive_test_noping.sh b/binary_search_tool/common/interactive_test_noping.sh
new file mode 100755
index 0000000..bb01b95
--- /dev/null
+++ b/binary_search_tool/common/interactive_test_noping.sh
@@ -0,0 +1,27 @@
+#!/bin/bash -u
+#
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# This script asks the user if the image is good or not, allowing the user to
+# conduct whatever tests the user wishes, and waiting for a response.
+#
+# This script is intended to be used by binary_search_state.py, as
+# part of the binary search triage on ChromeOS package and object files. It
+# waits for the test setup script to build and install the image, then asks the
+# user if the image is good or not. It should return '0' if the test succeeds
+# (the image is 'good'); '1' if the test fails (the image is 'bad'); and '125'
+# if it could not determine (does not apply in this case).
+#
+
+source common/common.sh
+
+while true; do
+    read -p "Is this a good ChromeOS image?" yn
+    case $yn in
+        [Yy]* ) exit 0;;
+        [Nn]* ) exit 1;;
+        * ) echo "Please answer yes or no.";;
+    esac
+done
+
+exit 125
diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py
index 3d6403a..a6d189b 100755
--- a/binary_search_tool/compiler_wrapper.py
+++ b/binary_search_tool/compiler_wrapper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Prototype compiler wrapper.
 
 Only tested with: gcc, g++, clang, clang++
diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py
index 32a1f16..ed4eab6 100755
--- a/binary_search_tool/cros_pkg/create_cleanup_script.py
+++ b/binary_search_tool/cros_pkg/create_cleanup_script.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 #  Copyright 2015 Google Inc. All Rights Reserved
 """The script to generate a cleanup script after setup.sh.
@@ -90,8 +90,8 @@
       if options.renamed_tree:
         # Old build tree existed and was a real tree, so it got
         # renamed.  Move the renamed tree back to the original tree.
-        out_file.write('sudo mv /build/%s.save /build/%s\n' %
-                       (options.board, options.board))
+        out_file.write('sudo mv /build/%s.save /build/%s\n' % (options.board,
+                                                               options.board))
       else:
         # Old tree existed and was already a soft link.  Re-create the
         # original soft link.
diff --git a/binary_search_tool/cros_pkg/interactive_test_noping.sh b/binary_search_tool/cros_pkg/interactive_test_noping.sh
new file mode 120000
index 0000000..c76f940
--- /dev/null
+++ b/binary_search_tool/cros_pkg/interactive_test_noping.sh
@@ -0,0 +1 @@
+../common/interactive_test_noping.sh
\ No newline at end of file
diff --git a/binary_search_tool/cros_pkg/test_setup_usb.sh b/binary_search_tool/cros_pkg/test_setup_usb.sh
new file mode 100755
index 0000000..fec66f8
--- /dev/null
+++ b/binary_search_tool/cros_pkg/test_setup_usb.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# This is a generic ChromeOS package/image test setup script. It is meant to
+# be used for the package bisection tool, in particular when there is a booting
+# issue with the image, so the image MUST be 'flashed' via USB.
+#
+# This script is intended to be used by binary_search_state.py, as
+# part of the binary search triage on ChromeOS objects and packages. It should
+# return '0' if the setup succeeds; and '1' if the setup fails (the image
+# could not built or be flashed).
+#
+
+export PYTHONUNBUFFERED=1
+
+source common/common.sh
+
+echo "BUILDING IMAGE"
+pushd ~/trunk/src/scripts
+./build_image test --board=${BISECT_BOARD} --noenable_rootfs_verification --noeclean
+build_status=$?
+popd
+
+if [[ ${build_status} -eq 0 ]] ; then
+    echo
+    echo "INSTALLING IMAGE VIA USB (requires some manual intervention)"
+    echo
+    echo "Insert a usb stick into the current machine"
+    echo "Note: The cros flash will take time and doesn't give much output."
+    echo "      Be patient. If your usb access light is flashing it's working."
+    sleep 1
+    read -p "Press enter to continue" notused
+
+    cros flash --board=${BISECT_BOARD} --clobber-stateful usb:// ~/trunk/src/build/images/${BISECT_BOARD}/latest/chromiumos_test_image.bin
+
+    echo
+    echo "Flash to usb complete!"
+    echo "Plug the usb into your chromebook and install the image."
+    echo "Refer to the ChromiumOS Developer's Handbook for more details."
+    echo "http://www.chromium.org/chromium-os/developer-guide#TOC-Boot-from-your-USB-disk"
+    while true; do
+      sleep 1
+      read -p "Was the installation of the image successful? " choice
+      case $choice in
+        [Yy]*) exit 0;;
+        [Nn]*) exit 1;;
+        *) echo "Please answer y or n.";;
+      esac
+    done
+else
+    echo "build_image returned a non-zero status: ${build_status}"
+    exit 1
+fi
+
+exit 0
diff --git a/binary_search_tool/full_bisect_test/bad-objects-permanent/_LIST b/binary_search_tool/full_bisect_test/bad-objects-permanent/_LIST
new file mode 100644
index 0000000..07bc1aa
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/bad-objects-permanent/_LIST
@@ -0,0 +1,7 @@
+build.o
+inorder_norecurse.o
+inorder.o
+main.o
+preorder_norecurse.o
+preorder.o
+stack.o
diff --git a/binary_search_tool/full_bisect_test/bad-output-1.txt b/binary_search_tool/full_bisect_test/bad-output-1.txt
new file mode 100644
index 0000000..dfd0bfc
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/bad-output-1.txt
@@ -0,0 +1,11 @@
+pre-order traversal, with recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+pre-order traversal, without recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+in-order traversal, with recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
+
+in-order traversal, without recursion: 
+28 30 35 60 65 68 70 
diff --git a/binary_search_tool/full_bisect_test/bad-output-2.txt b/binary_search_tool/full_bisect_test/bad-output-2.txt
new file mode 100644
index 0000000..e35ebdd
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/bad-output-2.txt
@@ -0,0 +1,11 @@
+pre-order traversal, with recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+pre-order traversal, without recursion: 
+35 60 70 
+
+in-order traversal, with recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
+
+in-order traversal, without recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
diff --git a/binary_search_tool/full_bisect_test/bad-output-3.txt b/binary_search_tool/full_bisect_test/bad-output-3.txt
new file mode 100644
index 0000000..5f3bfef
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/bad-output-3.txt
@@ -0,0 +1,11 @@
+pre-order traversal, with recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+pre-order traversal, without recursion: 
+35 60 70 
+
+in-order traversal, with recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
+
+in-order traversal, without recursion: 
+28 30 35 60 65 68 70 
diff --git a/binary_search_tool/full_bisect_test/bin-trees.h b/binary_search_tool/full_bisect_test/bin-trees.h
new file mode 100644
index 0000000..1c4fa19
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/bin-trees.h
@@ -0,0 +1,29 @@
+#ifndef _BIN_TREES_H
+#define _BIN_TREES_H
+
+
+struct bin_tree_struct {
+  int data;
+  char c_data;
+  struct bin_tree_struct *left;
+  struct bin_tree_struct *right;
+};
+
+typedef struct bin_tree_struct * tree_ptr;
+
+
+struct stack_struct {
+  tree_ptr data;
+  struct stack_struct *next;
+};
+
+
+void search_tree_insert (tree_ptr *, int);
+void pre_order_traverse (tree_ptr);
+void pre_order_traverse_no_recurse (tree_ptr);
+void in_order_traverse (tree_ptr);
+void in_order_traverse_no_recurse (tree_ptr);
+void push (struct stack_struct **, tree_ptr);
+tree_ptr pop (struct stack_struct **);
+
+#endif /* _BIN_TREES_H */
diff --git a/binary_search_tool/full_bisect_test/build.c b/binary_search_tool/full_bisect_test/build.c
new file mode 100644
index 0000000..ea1c8b4
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/build.c
@@ -0,0 +1,23 @@
+#include <stdlib.h>
+#include "bin-trees.h"
+
+tree_ptr
+new_node (int value)
+{
+  tree_ptr node = (tree_ptr) malloc (sizeof (tree_ptr));
+  node->data = value;
+  node->left = NULL;
+  node->right = NULL;
+  return node;
+}
+
+void
+search_tree_insert (tree_ptr *root, int value)
+{
+  if (*root == NULL)
+    *root = new_node (value);
+  else if (value < (*root)->data)
+    search_tree_insert (&((*root)->left), value);
+  else if (value > (*root)->data)
+    search_tree_insert (&((*root)->right), value);
+}
diff --git a/binary_search_tool/full_bisect_test/build.sh b/binary_search_tool/full_bisect_test/build.sh
new file mode 100755
index 0000000..9d40fb5
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# This file compiles all the source files into .o files, then links them to form
+# the test binary, 'bin-trees'.  The .o files all go into the 'work' directory.
+# There are 'good' and 'bad' versions of inorder_norecurse and preorder_norecurse
+# (e.g. inorder_norecurse.c.good and inorder_norecurse.c.bad).  This script
+# assumes that the desired versions of those files have been copied into
+# inorder_norecurse.c and preorder_norecurse.c.  The script files
+# make_sources_good.sh and make_sources_bad.sh are meant to handle this.
+#
+#  This script is meant to be run directly in the full_bisect_test directory.
+#  Most other scripts assume they are being run from the parent directory.
+
+gcc -c build.c -o work/build.o
+gcc -c preorder.c -o work/preorder.o
+gcc -c inorder.c -o work/inorder.o
+gcc -c main.c -o work/main.o
+gcc -c stack.c -o work/stack.o 
+gcc -c preorder_norecurse.c -o work/preorder_norecurse.o
+gcc -c inorder_norecurse.c -o work/inorder_norecurse.o
+gcc -o bin-trees work/main.o work/preorder.o work/inorder.o work/build.o work/preorder_norecurse.o work/inorder_norecurse.o work/stack.o
+
diff --git a/binary_search_tool/full_bisect_test/chromeos_build.sh b/binary_search_tool/full_bisect_test/chromeos_build.sh
new file mode 100755
index 0000000..f072bd0
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/chromeos_build.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# This file compiles all the source files into .o files, then links them to form
+# the test binary, 'bin-trees'.  The .o files all go into the 'work' directory.
+# There are 'good' and 'bad' versions of inorder_norecurse and preorder_norecurse
+# (e.g. inorder_norecurse.c.good and inorder_norecurse.c.bad).  This script
+# assumes that the desired versions of those files have been copied into
+# inorder_norecurse.c and preorder_norecurse.c.  The script files
+# make_sources_good.sh and make_sources_bad.sh are meant to handle this.
+#
+#  This script is meant to be run directly in the full_bisect_test directory.
+#  Most other scripts assume they are being run from the parent directory.
+
+x86_64-cros-linux-gnu-gcc -c build.c -o work/build.o
+x86_64-cros-linux-gnu-gcc -c preorder.c -o work/preorder.o
+x86_64-cros-linux-gnu-gcc -c inorder.c -o work/inorder.o
+x86_64-cros-linux-gnu-gcc -c main.c -o work/main.o
+x86_64-cros-linux-gnu-gcc -c stack.c -o work/stack.o 
+x86_64-cros-linux-gnu-gcc -c preorder_norecurse.c -o work/preorder_norecurse.o
+x86_64-cros-linux-gnu-gcc -c inorder_norecurse.c -o work/inorder_norecurse.o
+x86_64-cros-linux-gnu-gcc -o bin-trees work/main.o work/preorder.o work/inorder.o work/build.o work/preorder_norecurse.o work/inorder_norecurse.o work/stack.o
diff --git a/binary_search_tool/full_bisect_test/cleanup.sh b/binary_search_tool/full_bisect_test/cleanup.sh
new file mode 100755
index 0000000..48b44f3
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/cleanup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# In keeping with the normal way of doing bisectin, this script is meant to
+# remove files specific to the particular run of the bisector.
+#
+# This file is called from main-bisect-test.sh
+
+rm full_bisect_test/common.sh
diff --git a/binary_search_tool/full_bisect_test/get_initial_items.sh b/binary_search_tool/full_bisect_test/get_initial_items.sh
new file mode 100755
index 0000000..4c4043f
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/get_initial_items.sh
@@ -0,0 +1,9 @@
+#!/bin/bash -u
+#
+# This is one of the test scripts that needs to be passed to
+# binary_search_state.py.
+
+source full_bisect_test/common.sh
+
+cat ${BISECT_GOOD_BUILD}/_LIST
+
diff --git a/binary_search_tool/full_bisect_test/good-objects-permanent/_LIST b/binary_search_tool/full_bisect_test/good-objects-permanent/_LIST
new file mode 100644
index 0000000..07bc1aa
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/good-objects-permanent/_LIST
@@ -0,0 +1,7 @@
+build.o
+inorder_norecurse.o
+inorder.o
+main.o
+preorder_norecurse.o
+preorder.o
+stack.o
diff --git a/binary_search_tool/full_bisect_test/good-output.txt b/binary_search_tool/full_bisect_test/good-output.txt
new file mode 100644
index 0000000..4db15eb
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/good-output.txt
@@ -0,0 +1,11 @@
+pre-order traversal, with recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+pre-order traversal, without recursion: 
+35 28 20 25 23 26 30 60 70 65 64 68 
+
+in-order traversal, with recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
+
+in-order traversal, without recursion: 
+20 23 25 26 28 30 35 60 64 65 68 70 
diff --git a/binary_search_tool/full_bisect_test/inorder.c b/binary_search_tool/full_bisect_test/inorder.c
new file mode 100644
index 0000000..ad093f3
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/inorder.c
@@ -0,0 +1,22 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "bin-trees.h"
+
+static void
+real_inorder (tree_ptr root)
+{
+  if (root == NULL)
+    return;
+
+  real_inorder (root->left);
+  printf ("%d ", root->data);
+  real_inorder (root->right);
+}
+
+void
+in_order_traverse (tree_ptr root)
+{
+  printf ("in-order traversal, with recursion: \n");
+  real_inorder (root);
+  printf ("\n");
+}
diff --git a/binary_search_tool/full_bisect_test/inorder_norecurse.c.bad b/binary_search_tool/full_bisect_test/inorder_norecurse.c.bad
new file mode 100644
index 0000000..27f0bb1
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/inorder_norecurse.c.bad
@@ -0,0 +1,42 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+static void
+real_in_order_traverse_no_recurse (tree_ptr root)
+{
+  struct stack_struct *stack = NULL;
+  tree_ptr current= root;
+  int going_left = 1;   /* boolean variable */
+  while (current != NULL)
+  {
+    if ((current->left != NULL) && going_left)
+    {
+      push (&stack, current);
+      current = current->left;
+    }
+
+    printf ("%d ", current->data);
+    if (current->right)
+    {
+      current = current->right;
+      going_left = 1;
+    }
+    else if (stack != NULL)
+    {
+      current = pop(&stack);
+      going_left = 0;
+    }
+    else
+      current = NULL;
+  }
+}
+
+void
+in_order_traverse_no_recurse (tree_ptr root)
+{
+  printf ("in-order traversal, without recursion: \n");
+  real_in_order_traverse_no_recurse (root);
+  printf ("\n");
+  return;
+}
diff --git a/binary_search_tool/full_bisect_test/inorder_norecurse.c.good b/binary_search_tool/full_bisect_test/inorder_norecurse.c.good
new file mode 100644
index 0000000..a03f481
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/inorder_norecurse.c.good
@@ -0,0 +1,42 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+static void
+real_in_order_traverse_no_recurse (tree_ptr root)
+{
+  struct stack_struct *stack = NULL;
+  tree_ptr current= root;
+  int going_left = 1;   /* boolean variable */
+  while (current != NULL)
+  {
+    while ((current->left != NULL) && going_left)
+    {
+      push (&stack, current);
+      current = current->left;
+    }
+
+    printf ("%d ", current->data);
+    if (current->right)
+    {
+      current = current->right;
+      going_left = 1;
+    }
+    else if (stack != NULL)
+    {
+      current = pop(&stack);
+      going_left = 0;
+    }
+    else
+      current = NULL;
+  }
+}
+
+void
+in_order_traverse_no_recurse (tree_ptr root)
+{
+  printf ("in-order traversal, without recursion: \n");
+  real_in_order_traverse_no_recurse (root);
+  printf ("\n");
+  return;
+}
diff --git a/binary_search_tool/full_bisect_test/interactive_test.sh b/binary_search_tool/full_bisect_test/interactive_test.sh
new file mode 100755
index 0000000..064e4ae
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/interactive_test.sh
@@ -0,0 +1,56 @@
+#!/bin/bash -u
+#
+# This script is one of the required scripts that get passed to
+# binary_search_state.py.  It's job is to test the executable that
+# was generated by mixing/matching good & bad object files, and determine
+# whether the resulting binary is good or bad.
+#
+# In this particular case, the generated binary is 'bin-trees'.  This
+# script runs the binary, captures its output, and compares the output
+# to a file containg the correct (good) output, and three files containing
+# what the bad output might look like, depending on if one of the two
+# possile bad .o files was used, or if both bad .o files were used.
+#
+# If the output matches the known good output, this returns 0.
+# If the output matches any known bad output, this returns 1.
+# If the output does not match the good or bad outputs, this returns 125.
+#
+
+source full_bisect_test/common.sh
+
+full_bisect_test/bin-trees > full_bisect_test/temp_output.txt
+
+diff full_bisect_test/temp_output.txt full_bisect_test/good-output.txt &> /dev/null
+retval=$?
+
+if [[ ${retval} -eq 0 ]]; then
+  rm -f full_bisect_test/temp_output.txt
+  exit 0
+fi
+
+diff full_bisect_test/temp_output.txt full_bisect_test/bad-output-1.txt &> /dev/null
+retval=$?
+
+if [[ ${retval} -eq 0 ]]; then
+  rm -f full_bisect_test/temp_output.txt
+  exit 1
+else
+  diff full_bisect_test/temp_output.txt full_bisect_test/bad-output-2.txt &> /dev/null
+  retval=$?
+  if [[ ${retval} -eq 0 ]]; then
+    rm -f full_bisect_test/temp_output.txt
+    exit 1
+  else
+    diff full_bisect_test/temp_output.txt full_bisect_test/bad-output-3.txt &> /dev/null
+    retval=$?
+    if [[ ${retval} -eq 0 ]]; then
+      rm -f full_bisect_test/temp_output.txt
+      exit 1
+    fi
+  fi
+fi
+
+rm -f full_bisect_test/temp_output.txt
+exit 125
+
+
diff --git a/binary_search_tool/full_bisect_test/main-bisect-test.sh b/binary_search_tool/full_bisect_test/main-bisect-test.sh
new file mode 100755
index 0000000..af01c19
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/main-bisect-test.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+#  This script is the heart of the bisection test.  It assumes the good-objects
+#  and bad-objects directories have been created and populated.  It runs three
+#  bisection tests:
+#   Test 1.  use --file_args, and no pruning, which passes the object file list
+#            in a file, and stops as soon as it finds the first bad file.
+#   Test 2.  do not use --file_args, and no pruning.  The object files are passed
+#            directly on the command line; stop as soon as it finds the first
+#            bad file.
+#   Test 3.  use --file_args and --prune.  Pass the object file list in a file
+#            and run until it finds ALL the bad files (there are two of them).
+#
+
+SAVE_DIR=`pwd`
+
+DIR=full_bisect_test
+
+# Make sure you are running this script from the parent directory.
+if [[ ! -f "${DIR}/setup.sh" ]] ; then
+  echo "Cannot find ${DIR}/setup.sh.  You are running this from the wrong directory."
+  echo "You need to run this from toolchain-utils/binary_search_tool ."
+  exit 1
+fi
+
+# Run Test 1.
+${DIR}/setup.sh
+
+./binary_search_state.py --get_initial_items="${DIR}/get_initial_items.sh" \
+  --switch_to_good="${DIR}/switch_to_good.sh" \
+  --switch_to_bad="${DIR}/switch_to_bad.sh" \
+  --test_setup_script="${DIR}/test_setup.sh" \
+  --test_script="${DIR}/interactive_test.sh" \
+  --file_args &> /tmp/full_bisect_test.log
+
+${DIR}/cleanup.sh
+
+grep "Search complete. First bad version: " /tmp/full_bisect_test.log &> /dev/null
+test_status=$?
+
+if [[ ${test_status} -ne 0 ]] ; then
+  echo "Test 1 FAILED. See /tmp/full_bisect_test.log for details."
+  exit 1
+else
+  echo "Test 1 passed."
+fi
+
+cd ${SAVE_DIR}
+
+# Run Test 2.
+${DIR}/setup.sh
+
+./binary_search_state.py --get_initial_items="${DIR}/get_initial_items.sh" \
+  --switch_to_good="${DIR}/switch_to_good.sh" \
+  --switch_to_bad="${DIR}/switch_to_bad.sh" \
+  --test_setup_script="${DIR}/test_setup.sh" \
+  --test_script="${DIR}/interactive_test.sh" \
+  &> /tmp/full_bisect_test.log
+
+${DIR}/cleanup.sh
+
+grep "Search complete. First bad version: " /tmp/full_bisect_test.log &> /dev/null
+test_status=$?
+
+if [[ ${test_status} -ne 0 ]] ; then
+  echo "Test 2 FAILED. See /tmp/full_bisect_test.log for details."
+  exit 1
+else
+  echo "Test 2 passed."
+fi
+
+cd ${SAVE_DIR}
+
+# Run Test 3.
+${DIR}/setup.sh
+
+./binary_search_state.py --get_initial_items="${DIR}/get_initial_items.sh" \
+  --switch_to_good="${DIR}/switch_to_good.sh" \
+  --switch_to_bad="${DIR}/switch_to_bad.sh" \
+  --test_setup_script="${DIR}/test_setup.sh" \
+  --test_script="${DIR}/interactive_test.sh" \
+  --file_args --prune &> /tmp/full_bisect_test.log
+
+${DIR}/cleanup.sh
+
+grep "Bad items are: " /tmp/full_bisect_test.log | grep inorder_norecurse.o &> /dev/null
+test_status_1=$?
+
+grep "Bad items are: " /tmp/full_bisect_test.log | grep preorder_norecurse.o &> /dev/null
+test_status_2=$?
+
+if [[ ${test_status_1} -ne 0 ]] ; then
+  echo "Test 3 FAILED. See /tmp/full_bisect_test.log for details."
+  exit 1
+elif [[ ${test_status_2} -ne 0 ]] ; then
+  echo "Test 3 FAILED. See /tmp/full_bisect_test.log for details."
+  exit 1
+else
+  echo "Test 3 passed."
+fi
+
+# All tests passed!
+exit 0
+
diff --git a/binary_search_tool/full_bisect_test/main.c b/binary_search_tool/full_bisect_test/main.c
new file mode 100644
index 0000000..55abc44
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/main.c
@@ -0,0 +1,30 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+int integers[] = {35, 28, 20, 30, 25, 23, 26, 60, 70, 65, 64, 68 };
+
+char pre_order[] = { '/', '-', '+', '*', 'a', '^', 'x', '2', '&', 'b', 'y',
+                     'c', '3' };
+char in_order[]  = { 'a', '*', 'x', '^', '2', '+', 'b', '&', 'y', '-', 'c',
+                     '/', '3' };
+
+int
+main (int argc, char ** argv)
+{
+  int intlist_size = 12;
+  int i;
+  tree_ptr root = NULL;
+  for (i = 0; i < intlist_size; ++i)
+    {
+      search_tree_insert (&root, integers[i]);
+    }
+  pre_order_traverse (root);
+  printf ("\n");
+  pre_order_traverse_no_recurse (root);
+  printf ("\n");
+  in_order_traverse (root);
+  printf ("\n");
+  in_order_traverse_no_recurse (root);
+  return 0;
+}
diff --git a/binary_search_tool/full_bisect_test/make_sources_bad.sh b/binary_search_tool/full_bisect_test/make_sources_bad.sh
new file mode 100755
index 0000000..507e8ca
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/make_sources_bad.sh
@@ -0,0 +1,15 @@
+#!/bin/bash -u
+#
+#  There are two versions (good & bad) of inorder_norecurse.c and
+#  preorder_norecurse.c.  This script makes sure the bad versions
+#  are copied into the .c files that will be built and copied into
+#  the bad-objects directory, for the bisection test. It is called
+#  from run-test-nowrapper.sh.
+#
+
+pushd full_bisect_test
+
+cp inorder_norecurse.c.bad inorder_norecurse.c
+cp preorder_norecurse.c.bad preorder_norecurse.c
+
+popd
diff --git a/binary_search_tool/full_bisect_test/make_sources_good.sh b/binary_search_tool/full_bisect_test/make_sources_good.sh
new file mode 100755
index 0000000..611e944
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/make_sources_good.sh
@@ -0,0 +1,15 @@
+#!/bin/bash -u
+#
+#  There are two versions (good & bad) of inorder_norecurse.c and
+#  preorder_norecurse.c.  This script makes sure the good versions
+#  are copied into the .c files that will be built and copied into
+#  the good-objects directory, for the bisection test.  It is called
+#  from run-test-nowrapper.sh.
+#
+
+pushd full_bisect_test
+
+cp inorder_norecurse.c.good inorder_norecurse.c
+cp preorder_norecurse.c.good preorder_norecurse.c
+
+popd
diff --git a/binary_search_tool/full_bisect_test/preorder.c b/binary_search_tool/full_bisect_test/preorder.c
new file mode 100644
index 0000000..11fe93a
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/preorder.c
@@ -0,0 +1,23 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "bin-trees.h"
+
+static void
+real_preorder (tree_ptr root)
+{
+  if (root == NULL)
+    return;
+
+  printf ("%d ", root->data);
+  real_preorder (root->left);
+  real_preorder (root->right);
+}
+
+
+void
+pre_order_traverse (tree_ptr root)
+{
+  printf ("pre-order traversal, with recursion: \n");
+  real_preorder (root) ;
+  printf ("\n");
+}
diff --git a/binary_search_tool/full_bisect_test/preorder_norecurse.c.bad b/binary_search_tool/full_bisect_test/preorder_norecurse.c.bad
new file mode 100644
index 0000000..a8b4b48
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/preorder_norecurse.c.bad
@@ -0,0 +1,29 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+static void
+real_pre_order_traverse_no_recurse (tree_ptr root)
+{
+  struct stack_struct *stack = NULL;
+
+  if (root != NULL)
+    push (&stack, root);
+
+  while (stack != NULL)
+  {
+    tree_ptr current = pop (&stack);
+    printf ("%d ", current->data);
+    if (current->right != NULL)
+      push (&stack, current->right);
+  }
+  return;
+}
+
+void
+pre_order_traverse_no_recurse (tree_ptr root)
+{
+  printf ("pre-order traversal, without recursion: \n");
+  real_pre_order_traverse_no_recurse (root);
+  printf ("\n");
+}
diff --git a/binary_search_tool/full_bisect_test/preorder_norecurse.c.good b/binary_search_tool/full_bisect_test/preorder_norecurse.c.good
new file mode 100644
index 0000000..98f4091
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/preorder_norecurse.c.good
@@ -0,0 +1,31 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+static void
+real_pre_order_traverse_no_recurse (tree_ptr root)
+{
+  struct stack_struct *stack = NULL;
+
+  if (root != NULL)
+    push (&stack, root);
+
+  while (stack != NULL)
+  {
+    tree_ptr current = pop (&stack);
+    printf ("%d ", current->data);
+    if (current->right != NULL)
+      push (&stack, current->right);
+    if (current->left != NULL)
+      push (&stack, current->left);
+  }
+  return;
+}
+
+void
+pre_order_traverse_no_recurse (tree_ptr root)
+{
+  printf ("pre-order traversal, without recursion: \n");
+  real_pre_order_traverse_no_recurse (root);
+  printf ("\n");
+}
diff --git a/binary_search_tool/full_bisect_test/run-test-nowrapper.sh b/binary_search_tool/full_bisect_test/run-test-nowrapper.sh
new file mode 100755
index 0000000..afc4a44
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/run-test-nowrapper.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+#
+# This script is one of the two main driver scripts for testing the bisector.
+# It should be used to test the bisection tool, if you do NOT want to test
+# the compiler wrapper (e.g. don't bother with POPULATE_GOOD & POPULATE_BAD
+# stages).
+#
+# It makes sure the good & bad object directories exist (soft links); checks
+# to see if it needs to compile the good & bad sources & populate the
+# directories; does so if needed.
+#
+# Then it calls main-bisect-test, which runs the actual bisection tests.  This
+# script assumes it is being run from the parent directory.
+#
+# NOTE: Your PYTHONPATH environment variable needs to include both the
+# toolchain-utils directory and the
+# toolchain-utils/binary_search_tool directory for these testers to work.
+#
+
+SAVE_DIR=`pwd`
+
+DIR=full_bisect_test
+
+if [[ ! -d "${DIR}" ]] ; then
+  echo "Cannot find ${DIR}; you are running this script from the wrong place."
+  echo "You need to run this from toolchain-utils/binary_search_tool ."
+  exit 1
+fi
+
+# Set up object file soft links
+cd ${DIR}
+
+rm -f good-objects
+rm -f bad-objects
+
+ln -s good-objects-permanent good-objects
+ln -s bad-objects-permanent bad-objects
+
+if [[ ! -d work ]] ; then
+  mkdir work
+fi
+
+# Check to see if the object files need to be built.
+if [[ ! -f good-objects-permanent/build.o ]] ; then
+  # 'make clean'
+  rm -f work/*.o
+  # skip populate stages in bisect wrapper
+  unset BISECT_STAGE
+  # Set up the 'good' source files.
+  cd ..
+  ${DIR}/make_sources_good.sh
+  cd ${DIR}
+  # Build the 'good' .o files & copy to appropriate directory.
+  ./build.sh
+  mv work/*.o good-objects-permanent/.
+  # Set up the 'bad' source files.
+  cd ..
+  ${DIR}/make_sources_bad.sh
+  cd ${DIR}
+  # Build the 'bad' .o files & copy to appropriate directory.
+  ./build.sh
+  mv work/*.o bad-objects-permanent/.
+fi
+
+# Now we're ready for the main test.
+
+cd ${SAVE_DIR}
+${DIR}/main-bisect-test.sh
diff --git a/binary_search_tool/full_bisect_test/setup.sh b/binary_search_tool/full_bisect_test/setup.sh
new file mode 100755
index 0000000..1214de9
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/setup.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# This script creates common.sh, which will be sourced by all the other
+# scripts, to set up the necessary environment variables for the bisection
+# to work properly.  It is called from main-bisect-test.sh.
+#
+
+DIR=`pwd`/"full_bisect_test"
+
+GOOD_BUILD=${DIR}/good-objects
+BAD_BUILD=${DIR}/bad-objects
+
+mkdir -p ${DIR}/work
+
+WORK_BUILD=${DIR}/work
+
+rm -f ${WORK_BUILD}/*
+
+COMMON_FILE="${DIR}/common.sh"
+
+cat <<-EOF > ${COMMON_FILE}
+
+BISECT_GOOD_BUILD=${GOOD_BUILD}
+BISECT_BAD_BUILD=${BAD_BUILD}
+BISECT_WORK_BUILD=${WORK_BUILD}
+
+BISECT_GOOD_SET=${GOOD_BUILD}/_LIST
+BISECT_BAD_BAD=${BAD_BUILD}/_LIST
+
+BISECT_STAGE="TRIAGE"
+
+EOF
+
+chmod 755 ${COMMON_FILE}
+
+exit 0
diff --git a/binary_search_tool/full_bisect_test/stack.c b/binary_search_tool/full_bisect_test/stack.c
new file mode 100644
index 0000000..f8d0568
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/stack.c
@@ -0,0 +1,25 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include "bin-trees.h"
+
+tree_ptr
+pop (struct stack_struct **stack)
+{
+  if (*stack == NULL)
+    return NULL;
+  else
+    {
+      tree_ptr value = (*stack)->data;
+      (*stack) = (*stack)->next;
+      return value;
+    }
+}
+
+void
+push (struct stack_struct **stack, tree_ptr value)
+{
+  struct stack_struct *new_node = (struct stack_struct *) malloc (sizeof (struct stack_struct *));
+  new_node->data = value;
+  new_node->next = *stack;
+  *stack = new_node;
+}
diff --git a/binary_search_tool/full_bisect_test/switch_to_bad.sh b/binary_search_tool/full_bisect_test/switch_to_bad.sh
new file mode 100755
index 0000000..d88a4aa
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/switch_to_bad.sh
@@ -0,0 +1,54 @@
+#!/bin/bash -u
+#
+# This is one of the scripts that is passed to binary_search_state.py to do
+# the bisection.  This one takes a list of object files (either a real list or
+# a file containing the list) and copies the files from the bad objects
+# directory to the working directory.
+#
+
+source full_bisect_test/common.sh
+
+pushd ${BISECT_WORK_BUILD}
+chmod 644 *
+
+OBJ_LIST_FILES=$1
+FILE_ARGS=0
+
+if [[ -f ${OBJ_LIST_FILES} ]] ; then
+  file ${OBJ_LIST_FILES} &> ${BISECT_WORK_BUILD}/file_type.tmp
+  grep "ASCII text" ${BISECT_WORK_BUILD}/file_type.tmp
+  result=$?
+  if [[ ${result} -eq 0 ]] ; then
+    FILE_ARGS=1
+  fi
+  rm ${BISECT_WORK_BUILD}/file_type.tmp
+fi
+
+overall_status=0
+
+if [[ ${FILE_ARGS} -eq 1 ]] ; then
+  while read obj || [[ -n "${obj}" ]];
+  do
+    cp ${BISECT_BAD_BUILD}/${obj} ${BISECT_WORK_BUILD}
+    status=$?
+    if [[ ${status} -ne 0 ]] ; then
+      echo "Failed to copy ${obj} to work build tree."
+      overall_status=2
+    fi
+  done < ${OBJ_LIST_FILES}
+else
+
+  for o in "$@"
+  do
+    cp ${BISECT_BAD_BUILD}/${o} ${BISECT_WORK_BUILD}
+    status=$?
+    if [[ ${status} -ne 0 ]] ; then
+      echo "Failed to copy ${o} to work build tree."
+      overall_status=2
+    fi
+  done
+fi
+
+popd
+
+exit ${overall_status}
diff --git a/binary_search_tool/full_bisect_test/switch_to_good.sh b/binary_search_tool/full_bisect_test/switch_to_good.sh
new file mode 100755
index 0000000..9d8c29b
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/switch_to_good.sh
@@ -0,0 +1,57 @@
+#!/bin/bash -u
+#
+# This is one of the scripts that is passed to binary_search_state.py to do
+# the bisection.  This one takes a list of object files (either a real list or
+# a file containing the list) and copies the files from the good objects
+# directory to the working directory.
+#
+
+
+source full_bisect_test/common.sh
+
+pushd ${BISECT_WORK_BUILD}
+chmod 644 *
+
+OBJ_LIST_FILES=$1
+FILE_ARGS=0
+
+if [[ -f ${OBJ_LIST_FILES} ]] ; then
+  file ${OBJ_LIST_FILES} &> ${BISECT_WORK_BUILD}/file_type.tmp
+  grep "ASCII text" ${BISECT_WORK_BUILD}/file_type.tmp
+  result=$?
+  if [[ ${result} -eq 0 ]] ; then
+    FILE_ARGS=1
+  fi
+  rm ${BISECT_WORK_BUILD}/file_type.tmp
+fi
+
+overall_status=0
+
+if [[ ${FILE_ARGS} -eq 1 ]] ; then
+  while read obj || [[ -n "${obj}" ]];
+  do
+    echo "Copying {BISECT_GOOD_BUILD}/${obj} to ${BISECT_WORK_BUILD}"
+    cp ${BISECT_GOOD_BUILD}/${obj} ${BISECT_WORK_BUILD}
+#    cp ${obj} ${BISECT_WORK_BUILD}/.
+    status=$?
+    if [[ ${status} -ne 0 ]] ; then
+      echo "Failed to copy ${obj} to work build tree."
+      overall_status=2
+    fi
+  done < ${OBJ_LIST_FILES}
+else
+
+  for o in "$@"
+  do
+    cp ${BISECT_GOOD_BUILD}/${o} ${BISECT_WORK_BUILD}
+    status=$?
+    if [[ ${status} -ne 0 ]] ; then
+      echo "Failed to copy ${o} to work build tree."
+      overall_status=2
+    fi
+  done
+fi
+
+popd
+
+exit ${overall_status}
diff --git a/binary_search_tool/full_bisect_test/test_setup.sh b/binary_search_tool/full_bisect_test/test_setup.sh
new file mode 100755
index 0000000..bb31383
--- /dev/null
+++ b/binary_search_tool/full_bisect_test/test_setup.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# This is one of the scripts that gets passed to binary_search_state.py.
+# It's supposed to generate the binary to be tested, from the mix of
+# good & bad object files.
+#
+source full_bisect_test/common.sh
+
+WD=`pwd`
+
+cd full_bisect_test
+
+echo "BUILDING IMAGE"
+
+gcc -o bin-trees work/*.o
+
diff --git a/binary_search_tool/run_bisect_test.py b/binary_search_tool/run_bisect_test.py
new file mode 100755
index 0000000..d4ff4f7
--- /dev/null
+++ b/binary_search_tool/run_bisect_test.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python2
+"""Run full bisection test."""
+
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+
+from cros_utils import command_executer
+
+TEST_DIR = 'full_bisect_test'
+DEFAULT_BISECT_DIR = '/tmp/sysroot_bisect'
+
+
+def populate_good_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR):
+  # 'make clean'
+  work_dir = os.path.join(top_dir, TEST_DIR, 'work')
+  cmd = 'rm -f %s/*.o' % work_dir
+  status = ce.RunCommand(cmd)
+  if status != 0:
+    print('Error trying to clean out work directory: %s' % cmd)
+    return status
+
+  # set up the 'good' source files
+  script = os.path.join(top_dir, TEST_DIR, 'make_sources_good.sh')
+  status = ce.RunCommand(script)
+  if status != 0:
+    print('Error setting up "good" source files: %s' % script)
+    return status
+
+  export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir
+  # build the good source files
+  script_path = os.path.join(top_dir, TEST_DIR)
+  if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
+    build_script = 'chromeos_build.sh'
+  else:
+    build_script = 'build.sh'
+  cmd = ('%s export BISECT_STAGE=POPULATE_GOOD; pushd %s; ./%s; popd' %
+         (export_bisect, script_path, build_script))
+  status = ce.RunCommand(cmd)
+  return status
+
+
+def populate_bad_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR):
+  # 'make clean'
+  work_dir = os.path.join(top_dir, TEST_DIR, 'work')
+  cmd = 'rm -f %s/*.o' % work_dir
+  status = ce.RunCommand(cmd)
+  if status != 0:
+    print('Error trying to clean out work directory: %s' % cmd)
+    return status
+
+  # set up the 'bad' source files
+  script = os.path.join(top_dir, TEST_DIR, 'make_sources_bad.sh')
+  status = ce.RunCommand(script)
+  if status != 0:
+    print('Error setting up "bad" source files: %s' % script)
+    return status
+
+  export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir
+  # build the bad source files
+  script_path = os.path.join(top_dir, TEST_DIR)
+  if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
+    build_script = 'chromeos_build.sh'
+  else:
+    build_script = 'build.sh'
+  cmd = ('%s export BISECT_STAGE=POPULATE_BAD; pushd %s; ./%s ; popd' %
+         (export_bisect, script_path, build_script))
+  status = ce.RunCommand(cmd)
+  return status
+
+
+def run_main_bisection_test(top_dir, ce):
+  test_script = os.path.join(top_dir, TEST_DIR, 'main-bisect-test.sh')
+  status = ce.RunCommand(test_script)
+  return status
+
+
+def verify_compiler_and_wrapper():
+  # We don't need to do any special setup if running inside a ChromeOS
+  # chroot.
+  if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
+    return
+
+  message = """
+*** IMPORTANT --- READ THIS CAREFULLY!! ***
+
+This test uses the command 'gcc' to compile the good/bad versions of the
+source program.  BEFORE you can run this script you must make sure that
+your compiler wrapper is in the right place, with the right name, so that
+a call to 'gcc' will go through your compiler wrapper and "do the right
+thing".
+
+Is your compiler wrapper properly set up? [Y/n]
+"""
+
+  print(message)
+  inp = sys.stdin.readline()
+  inp = inp.strip()
+  inp = inp.lower()
+  return not inp or inp == 'y' or inp == 'yes'
+
+
+def Main(argv):
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      '--dir',
+      dest='directory',
+      help='Bisection work tree, where good  & bad object '
+      'files go.  Default is /tmp/sysroot_bisect')
+
+  options = parser.parse_args(argv)
+
+  # Make sure the compiler wrapper & soft links are properly set up.
+  wrapper_is_setup = verify_compiler_and_wrapper()
+  if not wrapper_is_setup:
+    print('Exiting now.  Please re-run after you have set up the compiler '
+          'wrapper.')
+    return 0
+
+  # Make sure we're in the correct directory for running this test.
+  cwd = os.getcwd()
+  if not os.path.exists(os.path.join(cwd, 'full_bisect_test')):
+    print('Error:  Wrong directory.  This script must be run from the top level'
+          ' of the binary_search_tool tree (under toolchain_utils).')
+    return 1
+
+  ce = command_executer.GetCommandExecuter()
+  bisect_dir = options.directory
+  if not bisect_dir:
+    bisect_dir = DEFAULT_BISECT_DIR
+
+  # Make sure BISECT_DIR is clean
+  if os.path.exists(bisect_dir):
+    cmd = 'rm -Rf %s/*' % bisect_dir
+    retv = ce.RunCommand(cmd)
+    if retv != 0:
+      return retv
+
+  retv = populate_good_files(cwd, ce, bisect_dir)
+  if retv != 0:
+    return retv
+
+  retv = populate_bad_files(cwd, ce, bisect_dir)
+  if retv != 0:
+    return retv
+
+  # Set up good/bad work soft links
+  cmd = ('rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects' %
+         (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR))
+
+  status = ce.RunCommand(cmd)
+  if status != 0:
+    print('Error executing: %s; exiting now.' % cmd)
+    return status
+
+  cmd = ('rm -f %s/%s/bad-objects; ln -s %s/bad %s/%s/bad-objects' %
+         (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR))
+
+  status = ce.RunCommand(cmd)
+  if status != 0:
+    print('Error executing: %s; exiting now.' % cmd)
+    return status
+
+  retv = run_main_bisection_test(cwd, ce)
+  return retv
+
+
+if __name__ == '__main__':
+  retval = Main(sys.argv[1:])
+  sys.exit(retval)
diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py
index 2f7bc4c..a0d6ca1 100755
--- a/binary_search_tool/sysroot_wrapper/testing_test.py
+++ b/binary_search_tool/sysroot_wrapper/testing_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Test for sysroot_wrapper bisector.
 
 All files in bad_files will be determined to be bad. This test was made for
@@ -15,9 +15,9 @@
 base_path = ('/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/'
              'Release/obj/')
 bad_files = [
-    os.path.join(base_path, 'base/base.cpu.o'),
-    os.path.join(base_path, 'base/base.version.o'),
-    os.path.join(base_path, 'apps/apps.launcher.o')
+    os.path.join(base_path, 'base/base.cpu.o'), os.path.join(
+        base_path, 'base/base.version.o'), os.path.join(base_path,
+                                                        'apps/apps.launcher.o')
 ]
 
 bisect_dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
diff --git a/binary_search_tool/test/binary_search_tool_tester.py b/binary_search_tool/test/binary_search_tool_tester.py
index 775c171..e733d9c 100755
--- a/binary_search_tool/test/binary_search_tool_tester.py
+++ b/binary_search_tool/test/binary_search_tool_tester.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 
 # Copyright 2012 Google Inc. All Rights Reserved.
 """Tests for bisecting tool."""
@@ -66,12 +66,13 @@
       return 0
 
     def Run(self):
-      return binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                     switch_to_good='./switch_to_good.py',
-                                     switch_to_bad='./switch_to_bad.py',
-                                     test_script='./is_good.py',
-                                     prune=True,
-                                     file_args=True)
+      return binary_search_state.Run(
+          get_initial_items='./gen_init_list.py',
+          switch_to_good='./switch_to_good.py',
+          switch_to_bad='./switch_to_bad.py',
+          test_script='./is_good.py',
+          prune=True,
+          file_args=True)
 
     def PostRun(self):
       CleanObj()
@@ -127,26 +128,31 @@
     except OSError:
       pass
 
-    cleanup_list = ['./is_setup', binary_search_state.STATE_FILE,
-                    'noinc_prune_bad', 'noinc_prune_good']
+    cleanup_list = [
+        './is_setup', binary_search_state.STATE_FILE, 'noinc_prune_bad',
+        'noinc_prune_good'
+    ]
     for f in cleanup_list:
       if os.path.exists(f):
         os.remove(f)
 
   def runTest(self):
-    ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                  switch_to_good='./switch_to_good.py',
-                                  switch_to_bad='./switch_to_bad.py',
-                                  test_script='./is_good.py',
-                                  prune=True,
-                                  file_args=True)
+    ret = binary_search_state.Run(
+        get_initial_items='./gen_init_list.py',
+        switch_to_good='./switch_to_good.py',
+        switch_to_bad='./switch_to_bad.py',
+        test_script='./is_good.py',
+        prune=True,
+        file_args=True)
     self.assertEquals(ret, 0)
     self.check_output()
 
   def test_arg_parse(self):
-    args = ['--get_initial_items', './gen_init_list.py', '--switch_to_good',
-            './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
-            '--test_script', './is_good.py', '--prune', '--file_args']
+    args = [
+        '--get_initial_items', './gen_init_list.py', '--switch_to_good',
+        './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
+        '--test_script', './is_good.py', '--prune', '--file_args'
+    ]
     ret = binary_search_state.Main(args)
     self.assertEquals(ret, 0)
     self.check_output()
@@ -154,32 +160,35 @@
   def test_test_setup_script(self):
     os.remove('./is_setup')
     with self.assertRaises(AssertionError):
-      ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                    switch_to_good='./switch_to_good.py',
-                                    switch_to_bad='./switch_to_bad.py',
-                                    test_script='./is_good.py',
-                                    prune=True,
-                                    file_args=True)
+      ret = binary_search_state.Run(
+          get_initial_items='./gen_init_list.py',
+          switch_to_good='./switch_to_good.py',
+          switch_to_bad='./switch_to_bad.py',
+          test_script='./is_good.py',
+          prune=True,
+          file_args=True)
 
-    ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                  switch_to_good='./switch_to_good.py',
-                                  switch_to_bad='./switch_to_bad.py',
-                                  test_script='./is_good.py',
-                                  test_setup_script='./test_setup.py',
-                                  prune=True,
-                                  file_args=True)
+    ret = binary_search_state.Run(
+        get_initial_items='./gen_init_list.py',
+        switch_to_good='./switch_to_good.py',
+        switch_to_bad='./switch_to_bad.py',
+        test_script='./is_good.py',
+        test_setup_script='./test_setup.py',
+        prune=True,
+        file_args=True)
     self.assertEquals(ret, 0)
     self.check_output()
 
   def test_bad_test_setup_script(self):
     with self.assertRaises(AssertionError):
-      binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                              switch_to_good='./switch_to_good.py',
-                              switch_to_bad='./switch_to_bad.py',
-                              test_script='./is_good.py',
-                              test_setup_script='./test_setup_bad.py',
-                              prune=True,
-                              file_args=True)
+      binary_search_state.Run(
+          get_initial_items='./gen_init_list.py',
+          switch_to_good='./switch_to_good.py',
+          switch_to_bad='./switch_to_bad.py',
+          test_script='./is_good.py',
+          test_setup_script='./test_setup_bad.py',
+          prune=True,
+          file_args=True)
 
   def test_bad_save_state(self):
     state_file = binary_search_state.STATE_FILE
@@ -294,13 +303,14 @@
     self.assertEquals(bad_objs[found_obj], 1)
 
   def test_set_file(self):
-    binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                            switch_to_good='./switch_to_good_set_file.py',
-                            switch_to_bad='./switch_to_bad_set_file.py',
-                            test_script='./is_good.py',
-                            prune=True,
-                            file_args=True,
-                            verify=True)
+    binary_search_state.Run(
+        get_initial_items='./gen_init_list.py',
+        switch_to_good='./switch_to_good_set_file.py',
+        switch_to_bad='./switch_to_bad_set_file.py',
+        test_script='./is_good.py',
+        prune=True,
+        file_args=True,
+        verify=True)
     self.check_output()
 
   def test_noincremental_prune(self):
@@ -343,13 +353,14 @@
   def test_every_obj_bad(self):
     amt = 25
     gen_obj.Main(['--obj_num', str(amt), '--bad_obj_num', str(amt)])
-    ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                  switch_to_good='./switch_to_good.py',
-                                  switch_to_bad='./switch_to_bad.py',
-                                  test_script='./is_good.py',
-                                  prune=True,
-                                  file_args=True,
-                                  verify=False)
+    ret = binary_search_state.Run(
+        get_initial_items='./gen_init_list.py',
+        switch_to_good='./switch_to_good.py',
+        switch_to_bad='./switch_to_bad.py',
+        test_script='./is_good.py',
+        prune=True,
+        file_args=True,
+        verify=False)
     self.assertEquals(ret, 0)
     self.check_output()
 
@@ -360,13 +371,14 @@
       obj_list[i] = '1'
       obj_list = ','.join(obj_list)
       gen_obj.Main(['--obj_list', obj_list])
-      ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
-                                    switch_to_good='./switch_to_good.py',
-                                    switch_to_bad='./switch_to_bad.py',
-                                    test_setup_script='./test_setup.py',
-                                    test_script='./is_good.py',
-                                    prune=True,
-                                    file_args=True)
+      ret = binary_search_state.Run(
+          get_initial_items='./gen_init_list.py',
+          switch_to_good='./switch_to_good.py',
+          switch_to_bad='./switch_to_bad.py',
+          test_setup_script='./test_setup.py',
+          test_script='./is_good.py',
+          prune=True,
+          file_args=True)
       self.assertEquals(ret, 0)
       self.check_output()
 
diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py
index baac943..5c3ff53 100755
--- a/binary_search_tool/test/common.py
+++ b/binary_search_tool/test/common.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Common utility functions."""
 
 DEFAULT_OBJECT_NUMBER = 1238
diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py
index 4a79a1b..002fc35 100755
--- a/binary_search_tool/test/gen_init_list.py
+++ b/binary_search_tool/test/gen_init_list.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Prints out index for every object file, starting from 0."""
 
 from __future__ import print_function
diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py
index 265729d..d17e93f 100755
--- a/binary_search_tool/test/gen_obj.py
+++ b/binary_search_tool/test/gen_obj.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Script to generate a list of object files.
 
 0 represents a good object file.
@@ -86,8 +86,8 @@
 
   obj_num = len(obj_list)
   bad_obj_num = obj_list.count('1')
-  print('Generated {0} object files, with {1} bad ones.'.format(obj_num,
-                                                                bad_obj_num))
+  print('Generated {0} object files, with {1} bad ones.'.format(
+      obj_num, bad_obj_num))
 
   return 0
 
diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py
index bfe9cc3..a0be4a0 100755
--- a/binary_search_tool/test/is_good.py
+++ b/binary_search_tool/test/is_good.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Check to see if the working set produces a good executable."""
 
 from __future__ import print_function
diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py
index 5aafd6c..a900bd3 100755
--- a/binary_search_tool/test/is_good_noinc_prune.py
+++ b/binary_search_tool/test/is_good_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Check to see if the working set produces a good executable.
 
 This test script is made for the noincremental-prune test. This makes sure
diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py
index 165004e..51b7110 100755
--- a/binary_search_tool/test/switch_tmp.py
+++ b/binary_search_tool/test/switch_tmp.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Change portions of the object files to good.
 
 This file is a test switch script. Used only for the test test_tmp_cleanup.
diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py
index b860242..a1b6bd5 100755
--- a/binary_search_tool/test/switch_to_bad.py
+++ b/binary_search_tool/test/switch_to_bad.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Switch part of the objects file in working set to (possible) bad ones."""
 
 from __future__ import print_function
diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py
index 87bf158..db76aca 100755
--- a/binary_search_tool/test/switch_to_bad_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Switch part of the objects file in working set to (possible) bad ones.
 
 The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py
index f535fdf..edf226d 100755
--- a/binary_search_tool/test/switch_to_bad_set_file.py
+++ b/binary_search_tool/test/switch_to_bad_set_file.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Switch part of the objects file in working set to (possible) bad ones.
 
 This script is meant to be specifically used with the set_file test. This uses
diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py
index 68e9633..59a118c 100755
--- a/binary_search_tool/test/switch_to_good.py
+++ b/binary_search_tool/test/switch_to_good.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Change portions of the object files to good.
 
 The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py
index c5e78e4..00488a7 100755
--- a/binary_search_tool/test/switch_to_good_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_good_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Change portions of the object files to good.
 
 The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py
index 83777af..b5e521f 100755
--- a/binary_search_tool/test/switch_to_good_set_file.py
+++ b/binary_search_tool/test/switch_to_good_set_file.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Change portions of the object files to good.
 
 The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py
index 3fb5a23..0d6a410 100755
--- a/binary_search_tool/test/test_setup.py
+++ b/binary_search_tool/test/test_setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Emulate running of test setup script, is_good.py should fail without this."""
 
 from __future__ import print_function
diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py
index 8d72763..d715f57 100755
--- a/binary_search_tool/test/test_setup_bad.py
+++ b/binary_search_tool/test/test_setup_bad.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Emulate test setup that fails (i.e. failed flash to device)"""
 
 from __future__ import print_function
diff --git a/build_chrome_browser.py b/build_chrome_browser.py
index 8effa19..c3b7887 100755
--- a/build_chrome_browser.py
+++ b/build_chrome_browser.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to checkout the ChromeOS source.
@@ -32,71 +32,81 @@
   cmd_executer = command_executer.GetCommandExecuter()
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('--chromeos_root',
-                      dest='chromeos_root',
-                      help='Target directory for ChromeOS installation.')
+  parser.add_argument(
+      '--chromeos_root',
+      dest='chromeos_root',
+      help='Target directory for ChromeOS installation.')
   parser.add_argument('--version', dest='version')
-  parser.add_argument('--clean',
-                      dest='clean',
-                      default=False,
-                      action='store_true',
-                      help=('Clean the /var/cache/chromeos-chrome/'
-                            'chrome-src/src/out_$board dir'))
-  parser.add_argument('--env',
-                      dest='env',
-                      default='',
-                      help='Use the following env')
-  parser.add_argument('--ebuild_version',
-                      dest='ebuild_version',
-                      help='Use this ebuild instead of the default one.')
-  parser.add_argument('--cflags',
-                      dest='cflags',
-                      default='',
-                      help='CFLAGS for the ChromeOS packages')
-  parser.add_argument('--cxxflags',
-                      dest='cxxflags',
-                      default='',
-                      help='CXXFLAGS for the ChromeOS packages')
-  parser.add_argument('--ldflags',
-                      dest='ldflags',
-                      default='',
-                      help='LDFLAGS for the ChromeOS packages')
-  parser.add_argument('--board',
-                      dest='board',
-                      help='ChromeOS target board, e.g. x86-generic')
-  parser.add_argument('--no_build_image',
-                      dest='no_build_image',
-                      default=False,
-                      action='store_true',
-                      help=('Skip build image after building browser.'
-                            'Defaults to False.'))
-  parser.add_argument('--label',
-                      dest='label',
-                      help='Optional label to apply to the ChromeOS image.')
-  parser.add_argument('--build_image_args',
-                      default='',
-                      dest='build_image_args',
-                      help='Optional arguments to build_image.')
-  parser.add_argument('--cros_workon',
-                      dest='cros_workon',
-                      help='Build using external source tree.')
-  parser.add_argument('--dev',
-                      dest='dev',
-                      default=False,
-                      action='store_true',
-                      help=('Build a dev (eg. writable/large) image. '
-                            'Defaults to False.'))
-  parser.add_argument('--debug',
-                      dest='debug',
-                      default=False,
-                      action='store_true',
-                      help=('Build chrome browser using debug mode. '
-                            'This option implies --dev. Defaults to false.'))
-  parser.add_argument('--verbose',
-                      dest='verbose',
-                      default=False,
-                      action='store_true',
-                      help='Build with verbose information.')
+  parser.add_argument(
+      '--clean',
+      dest='clean',
+      default=False,
+      action='store_true',
+      help=('Clean the /var/cache/chromeos-chrome/'
+            'chrome-src/src/out_$board dir'))
+  parser.add_argument(
+      '--env', dest='env', default='', help='Use the following env')
+  parser.add_argument(
+      '--ebuild_version',
+      dest='ebuild_version',
+      help='Use this ebuild instead of the default one.')
+  parser.add_argument(
+      '--cflags',
+      dest='cflags',
+      default='',
+      help='CFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--cxxflags',
+      dest='cxxflags',
+      default='',
+      help='CXXFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--ldflags',
+      dest='ldflags',
+      default='',
+      help='LDFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--board', dest='board', help='ChromeOS target board, e.g. x86-generic')
+  parser.add_argument(
+      '--no_build_image',
+      dest='no_build_image',
+      default=False,
+      action='store_true',
+      help=('Skip build image after building browser.'
+            'Defaults to False.'))
+  parser.add_argument(
+      '--label',
+      dest='label',
+      help='Optional label to apply to the ChromeOS image.')
+  parser.add_argument(
+      '--build_image_args',
+      default='',
+      dest='build_image_args',
+      help='Optional arguments to build_image.')
+  parser.add_argument(
+      '--cros_workon',
+      dest='cros_workon',
+      help='Build using external source tree.')
+  parser.add_argument(
+      '--dev',
+      dest='dev',
+      default=False,
+      action='store_true',
+      help=('Build a dev (eg. writable/large) image. '
+            'Defaults to False.'))
+  parser.add_argument(
+      '--debug',
+      dest='debug',
+      default=False,
+      action='store_true',
+      help=('Build chrome browser using debug mode. '
+            'This option implies --dev. Defaults to false.'))
+  parser.add_argument(
+      '--verbose',
+      dest='verbose',
+      default=False,
+      action='store_true',
+      help='Build with verbose information.')
 
   options = parser.parse_args(argv)
 
@@ -130,8 +140,8 @@
     ebuild_version = 'chromeos-chrome'
 
   if options.cros_workon and not (
-      os.path.isdir(options.cros_workon) and os.path.exists(os.path.join(
-          options.cros_workon, 'src/chromeos/chromeos.gyp'))):
+      os.path.isdir(options.cros_workon) and os.path.exists(
+          os.path.join(options.cros_workon, 'src/chromeos/BUILD.gn'))):
     Usage(parser, '--cros_workon must be a valid chromium browser checkout.')
 
   if options.verbose:
@@ -179,9 +189,10 @@
   if options.cros_workon:
     cros_sdk_options = '--chrome_root={0}'.format(options.cros_workon)
 
-  ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
-                                      emerge_browser_command,
-                                      cros_sdk_options=cros_sdk_options)
+  ret = cmd_executer.ChrootRunCommand(
+      options.chromeos_root,
+      emerge_browser_command,
+      cros_sdk_options=cros_sdk_options)
 
   logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
 
@@ -197,13 +208,12 @@
     return ret
 
   # Finally build the image
-  ret = cmd_executer.ChrootRunCommand(
-      options.chromeos_root,
-      '{0} {1} {2} {3}'.format(unmask_env,
-                               options.env,
-                               misc.GetBuildImageCommand(options.board,
-                                                         dev=options.dev),
-                               options.build_image_args))
+  ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
+                                      '{0} {1} {2} {3}'.format(
+                                          unmask_env, options.env,
+                                          misc.GetBuildImageCommand(
+                                              options.board, dev=options.dev),
+                                          options.build_image_args))
 
   logger.GetLogger().LogFatalIf(ret, 'build_image failed')
 
@@ -226,8 +236,8 @@
         options.label)
 
     ret = cmd_executer.RunCommand(command)
-    logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
-                                  options.label)
+    logger.GetLogger().LogFatalIf(
+        ret, 'Failed to apply symlink label %s' % options.label)
 
   return ret
 
diff --git a/build_chromeos.py b/build_chromeos.py
index cb68fd0..0b0676d 100755
--- a/build_chromeos.py
+++ b/build_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to checkout the ChromeOS source.
@@ -35,73 +35,81 @@
   cmd_executer = command_executer.GetCommandExecuter()
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('--chromeos_root',
-                      dest='chromeos_root',
-                      help='Target directory for ChromeOS installation.')
-  parser.add_argument('--clobber_chroot',
-                      dest='clobber_chroot',
-                      action='store_true',
-                      help='Delete the chroot and start fresh',
-                      default=False)
-  parser.add_argument('--clobber_board',
-                      dest='clobber_board',
-                      action='store_true',
-                      help='Delete the board and start fresh',
-                      default=False)
-  parser.add_argument('--rebuild',
-                      dest='rebuild',
-                      action='store_true',
-                      help='Rebuild all board packages except the toolchain.',
-                      default=False)
-  parser.add_argument('--cflags',
-                      dest='cflags',
-                      default='',
-                      help='CFLAGS for the ChromeOS packages')
-  parser.add_argument('--cxxflags',
-                      dest='cxxflags',
-                      default='',
-                      help='CXXFLAGS for the ChromeOS packages')
-  parser.add_argument('--ldflags',
-                      dest='ldflags',
-                      default='',
-                      help='LDFLAGS for the ChromeOS packages')
-  parser.add_argument('--board',
-                      dest='board',
-                      help='ChromeOS target board, e.g. x86-generic')
-  parser.add_argument('--package',
-                      dest='package',
-                      help='The package needs to be built')
-  parser.add_argument('--label',
-                      dest='label',
-                      help='Optional label symlink to point to build dir.')
-  parser.add_argument('--dev',
-                      dest='dev',
-                      default=False,
-                      action='store_true',
-                      help=('Make the final image in dev mode (eg writable, '
-                            'more space on image). Defaults to False.'))
-  parser.add_argument('--debug',
-                      dest='debug',
-                      default=False,
-                      action='store_true',
-                      help=("Optional. Build chrome browser with \"-g -O0\". "
-                            "Notice, this also turns on \'--dev\'. "
-                            'Defaults to False.'))
-  parser.add_argument('--env',
-                      dest='env',
-                      default='',
-                      help='Env to pass to build_packages.')
-  parser.add_argument('--vanilla',
-                      dest='vanilla',
-                      default=False,
-                      action='store_true',
-                      help='Use default ChromeOS toolchain.')
-  parser.add_argument('--vanilla_image',
-                      dest='vanilla_image',
-                      default=False,
-                      action='store_true',
-                      help=('Use prebuild packages for building the image. '
-                            'It also implies the --vanilla option is set.'))
+  parser.add_argument(
+      '--chromeos_root',
+      dest='chromeos_root',
+      help='Target directory for ChromeOS installation.')
+  parser.add_argument(
+      '--clobber_chroot',
+      dest='clobber_chroot',
+      action='store_true',
+      help='Delete the chroot and start fresh',
+      default=False)
+  parser.add_argument(
+      '--clobber_board',
+      dest='clobber_board',
+      action='store_true',
+      help='Delete the board and start fresh',
+      default=False)
+  parser.add_argument(
+      '--rebuild',
+      dest='rebuild',
+      action='store_true',
+      help='Rebuild all board packages except the toolchain.',
+      default=False)
+  parser.add_argument(
+      '--cflags',
+      dest='cflags',
+      default='',
+      help='CFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--cxxflags',
+      dest='cxxflags',
+      default='',
+      help='CXXFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--ldflags',
+      dest='ldflags',
+      default='',
+      help='LDFLAGS for the ChromeOS packages')
+  parser.add_argument(
+      '--board', dest='board', help='ChromeOS target board, e.g. x86-generic')
+  parser.add_argument(
+      '--package', dest='package', help='The package needs to be built')
+  parser.add_argument(
+      '--label',
+      dest='label',
+      help='Optional label symlink to point to build dir.')
+  parser.add_argument(
+      '--dev',
+      dest='dev',
+      default=False,
+      action='store_true',
+      help=('Make the final image in dev mode (eg writable, '
+            'more space on image). Defaults to False.'))
+  parser.add_argument(
+      '--debug',
+      dest='debug',
+      default=False,
+      action='store_true',
+      help=("Optional. Build chrome browser with \"-g -O0\". "
+            "Notice, this also turns on \'--dev\'. "
+            'Defaults to False.'))
+  parser.add_argument(
+      '--env', dest='env', default='', help='Env to pass to build_packages.')
+  parser.add_argument(
+      '--vanilla',
+      dest='vanilla',
+      default=False,
+      action='store_true',
+      help='Use default ChromeOS toolchain.')
+  parser.add_argument(
+      '--vanilla_image',
+      dest='vanilla_image',
+      default=False,
+      action='store_true',
+      help=('Use prebuild packages for building the image. '
+            'It also implies the --vanilla option is set.'))
 
   options = parser.parse_args(argv[1:])
 
@@ -134,12 +142,10 @@
   # Build with afdo_use by default.
   # To change the default use --env="USE=-afdo_use".
   build_packages_env = misc.MergeEnvStringWithDict(
-      build_packages_env, {'USE': 'chrome_internal afdo_use'})
+      build_packages_env, {'USE': 'chrome_internal afdo_use -cros-debug'})
 
   build_packages_command = misc.GetBuildPackagesCommand(
-      board=options.board,
-      usepkg=options.vanilla_image,
-      debug=options.debug)
+      board=options.board, usepkg=options.vanilla_image, debug=options.debug)
 
   if options.package:
     build_packages_command += ' {0}'.format(options.package)
@@ -147,9 +153,10 @@
   build_image_command = misc.GetBuildImageCommand(options.board, options.dev)
 
   if options.vanilla or options.vanilla_image:
-    command = misc.GetSetupBoardCommand(options.board,
-                                        usepkg=options.vanilla_image,
-                                        force=options.clobber_board)
+    command = misc.GetSetupBoardCommand(
+        options.board,
+        usepkg=options.vanilla_image,
+        force=options.clobber_board)
     command += '; ' + build_packages_env + ' ' + build_packages_command
     command += '&& ' + build_packages_env + ' ' + build_image_command
     ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
@@ -237,10 +244,10 @@
       "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
       'CHROME_ORIGIN=SERVER_SOURCE '
       '%s '
-      '%s --skip_chroot_upgrade' % (options.board, options.cflags,
-                                    options.board, options.cxxflags,
-                                    options.board, options.ldflags,
-                                    build_packages_env, build_packages_command))
+      '%s --skip_chroot_upgrade' %
+      (options.board, options.cflags, options.board, options.cxxflags,
+       options.board, options.ldflags, build_packages_env,
+       build_packages_command))
 
   logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
   if options.package:
@@ -269,8 +276,8 @@
                 os.path.dirname(real_image_dir_path), options.label))
 
     ret = cmd_executer.RunCommand(command)
-    logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
-                                  options.label)
+    logger.GetLogger().LogFatalIf(
+        ret, 'Failed to apply symlink label %s' % options.label)
 
   return ret
 
diff --git a/build_tc.py b/build_tc.py
index 55fc5b7..4f022d2 100755
--- a/build_tc.py
+++ b/build_tc.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
@@ -44,9 +44,9 @@
                                                       self._chromeos_root)
     self.tag = '%s-%s' % (name, self._ctarget)
     self._ce = command_executer.GetCommandExecuter()
-    self._mask_file = os.path.join(self._chromeos_root, 'chroot',
-                                   'etc/portage/package.mask/cross-%s' %
-                                   self._ctarget)
+    self._mask_file = os.path.join(
+        self._chromeos_root, 'chroot',
+        'etc/portage/package.mask/cross-%s' % self._ctarget)
     self._new_mask_file = None
 
     self._chroot_source_path = os.path.join(constants.MOUNTED_TOOLCHAIN_ROOT,
@@ -59,8 +59,8 @@
     cross_symlink = os.path.join(self._chromeos_root, 'chroot',
                                  'usr/local/bin/emerge-%s' % self._board)
     if not os.path.exists(cross_symlink):
-      command = ('%s/setup_board --board=%s' %
-                 (misc.CHROMEOS_SCRIPTS_DIR, self._board))
+      command = ('%s/setup_board --board=%s' % (misc.CHROMEOS_SCRIPTS_DIR,
+                                                self._board))
       self._ce.ChrootRunCommand(self._chromeos_root, command)
 
   def Build(self):
@@ -114,9 +114,9 @@
       mount_statuses = [mp.DoMount() == 0 for mp in mount_points]
 
       if not all(mount_statuses):
-        mounted = [mp
-                   for mp, status in zip(mount_points, mount_statuses)
-                   if status]
+        mounted = [
+            mp for mp, status in zip(mount_points, mount_statuses) if status
+        ]
         unmount_statuses = [mp.UnMount() == 0 for mp in mounted]
         assert all(unmount_statuses), 'Could not unmount all mount points!'
 
@@ -149,8 +149,8 @@
     if self._name == 'gcc' and not self._gcc_enable_ccache:
       env['USE'] += ' -wrapper_ccache'
 
-    env['%s_SOURCE_PATH' % self._name.upper()] = (
-        os.path.join('/', self._chroot_source_path))
+    env['%s_SOURCE_PATH' % self._name.upper()] = (os.path.join(
+        '/', self._chroot_source_path))
     env['ACCEPT_KEYWORDS'] = '~*'
     env_string = ' '.join(["%s=\"%s\"" % var for var in env.items()])
     command = 'emerge =cross-%s/%s-9999' % (self._ctarget, self._name)
@@ -159,8 +159,8 @@
     if rv != 0:
       return rv
     if self._name == 'gcc':
-      command = ('sudo cp -r /usr/lib/gcc/%s %s' %
-                 (self._ctarget, self._gcc_libs_dest))
+      command = ('sudo cp -r /usr/lib/gcc/%s %s' % (self._ctarget,
+                                                    self._gcc_libs_dest))
       rv = self._ce.ChrootRunCommand(self._chromeos_root, command)
     return rv
 
@@ -181,83 +181,99 @@
   """The main function."""
   # Common initializations
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      default='../../',
-                      help=('ChromeOS root checkout directory'
-                            ' uses ../.. if none given.'))
-  parser.add_argument('-g',
-                      '--gcc_dir',
-                      dest='gcc_dir',
-                      help='The directory where gcc resides.')
-  parser.add_argument('--binutils_dir',
-                      dest='binutils_dir',
-                      help='The directory where binutils resides.')
-  parser.add_argument('-x',
-                      '--gdb_dir',
-                      dest='gdb_dir',
-                      help='The directory where gdb resides.')
-  parser.add_argument('-b',
-                      '--board',
-                      dest='board',
-                      default='x86-alex',
-                      help='The target board.')
-  parser.add_argument('-n',
-                      '--noincremental',
-                      dest='noincremental',
-                      default=False,
-                      action='store_true',
-                      help='Use FEATURES=keepwork to do incremental builds.')
-  parser.add_argument('--cflags',
-                      dest='cflags',
-                      default='',
-                      help='Build a compiler with specified CFLAGS')
-  parser.add_argument('--cxxflags',
-                      dest='cxxflags',
-                      default='',
-                      help='Build a compiler with specified CXXFLAGS')
-  parser.add_argument('--cflags_for_target',
-                      dest='cflags_for_target',
-                      default='',
-                      help='Build the target libraries with specified flags')
-  parser.add_argument('--cxxflags_for_target',
-                      dest='cxxflags_for_target',
-                      default='',
-                      help='Build the target libraries with specified flags')
-  parser.add_argument('--ldflags',
-                      dest='ldflags',
-                      default='',
-                      help='Build a compiler with specified LDFLAGS')
-  parser.add_argument('-d',
-                      '--debug',
-                      dest='debug',
-                      default=False,
-                      action='store_true',
-                      help='Build a compiler with -g3 -O0 appended to both'
-                      ' CFLAGS and CXXFLAGS.')
-  parser.add_argument('-m',
-                      '--mount_only',
-                      dest='mount_only',
-                      default=False,
-                      action='store_true',
-                      help='Just mount the tool directories.')
-  parser.add_argument('-u',
-                      '--unmount_only',
-                      dest='unmount_only',
-                      default=False,
-                      action='store_true',
-                      help='Just unmount the tool directories.')
-  parser.add_argument('--extra_use_flags',
-                      dest='extra_use_flags',
-                      default='',
-                      help='Extra flag for USE, to be passed to the ebuild. '
-                      "('multislot' and 'mounted_<tool>' are always passed.)")
-  parser.add_argument('--gcc_enable_ccache',
-                      dest='gcc_enable_ccache',
-                      default=False,
-                      action='store_true',
-                      help='Enable ccache for the gcc invocations')
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      default='../../',
+      help=('ChromeOS root checkout directory'
+            ' uses ../.. if none given.'))
+  parser.add_argument(
+      '-g',
+      '--gcc_dir',
+      dest='gcc_dir',
+      help='The directory where gcc resides.')
+  parser.add_argument(
+      '--binutils_dir',
+      dest='binutils_dir',
+      help='The directory where binutils resides.')
+  parser.add_argument(
+      '-x',
+      '--gdb_dir',
+      dest='gdb_dir',
+      help='The directory where gdb resides.')
+  parser.add_argument(
+      '-b',
+      '--board',
+      dest='board',
+      default='x86-alex',
+      help='The target board.')
+  parser.add_argument(
+      '-n',
+      '--noincremental',
+      dest='noincremental',
+      default=False,
+      action='store_true',
+      help='Use FEATURES=keepwork to do incremental builds.')
+  parser.add_argument(
+      '--cflags',
+      dest='cflags',
+      default='',
+      help='Build a compiler with specified CFLAGS')
+  parser.add_argument(
+      '--cxxflags',
+      dest='cxxflags',
+      default='',
+      help='Build a compiler with specified CXXFLAGS')
+  parser.add_argument(
+      '--cflags_for_target',
+      dest='cflags_for_target',
+      default='',
+      help='Build the target libraries with specified flags')
+  parser.add_argument(
+      '--cxxflags_for_target',
+      dest='cxxflags_for_target',
+      default='',
+      help='Build the target libraries with specified flags')
+  parser.add_argument(
+      '--ldflags',
+      dest='ldflags',
+      default='',
+      help='Build a compiler with specified LDFLAGS')
+  parser.add_argument(
+      '-d',
+      '--debug',
+      dest='debug',
+      default=False,
+      action='store_true',
+      help='Build a compiler with -g3 -O0 appended to both'
+      ' CFLAGS and CXXFLAGS.')
+  parser.add_argument(
+      '-m',
+      '--mount_only',
+      dest='mount_only',
+      default=False,
+      action='store_true',
+      help='Just mount the tool directories.')
+  parser.add_argument(
+      '-u',
+      '--unmount_only',
+      dest='unmount_only',
+      default=False,
+      action='store_true',
+      help='Just unmount the tool directories.')
+  parser.add_argument(
+      '--extra_use_flags',
+      dest='extra_use_flags',
+      default='',
+      help='Extra flag for USE, to be passed to the ebuild. '
+      "('multislot' and 'mounted_<tool>' are always passed.)")
+  parser.add_argument(
+      '--gcc_enable_ccache',
+      dest='gcc_enable_ccache',
+      default=False,
+      action='store_true',
+      help='Enable ccache for the gcc invocations')
 
   options = parser.parse_args(argv)
 
diff --git a/build_tool.py b/build_tool.py
index 1df4695..3bd357c 100755
--- a/build_tool.py
+++ b/build_tool.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Script to bootstrap the chroot using new toolchain.
 
 This script allows you to build/install a customized version of gcc/binutils,
@@ -34,7 +34,6 @@
 import re
 import sys
 
-
 from cros_utils import command_executer
 from cros_utils import logger
 from cros_utils import misc
@@ -92,14 +91,14 @@
   def SubmitToLocalBranch(self):
     """Copy source code to the chromium source tree and submit it locally."""
     if self._gcc_dir:
-      if not self.SubmitToolToLocalBranch(tool_name='gcc',
-                                          tool_dir=self._gcc_dir):
+      if not self.SubmitToolToLocalBranch(
+          tool_name='gcc', tool_dir=self._gcc_dir):
         return False
       self._gcc_branch = TEMP_BRANCH_NAME
 
     if self._binutils_dir:
-      if not self.SubmitToolToLocalBranch(tool_name='binutils',
-                                          tool_dir=self._binutils_dir):
+      if not self.SubmitToolToLocalBranch(
+          tool_name='binutils', tool_dir=self._binutils_dir):
         return False
       self._binutils_branch = TEMP_BRANCH_NAME
 
@@ -122,8 +121,8 @@
 
     # 0. Test to see if git tree is free of local changes.
     if not misc.IsGitTreeClean(chrome_tool_dir):
-      self._logger.LogError('Git repository "{0}" not clean, aborted.'.format(
-          chrome_tool_dir))
+      self._logger.LogError(
+          'Git repository "{0}" not clean, aborted.'.format(chrome_tool_dir))
       return False
 
     # 1. Checkout/create a (new) branch for testing.
@@ -135,9 +134,8 @@
       return False
 
     if self.IsTreeSame(tool_dir, chrome_tool_dir):
-      self._logger.LogOutput(
-          '"{0}" and "{1}" are the same, sync skipped.'.format(tool_dir,
-                                                               chrome_tool_dir))
+      self._logger.LogOutput('"{0}" and "{1}" are the same, sync skipped.'.
+                             format(tool_dir, chrome_tool_dir))
       return True
 
     # 2. Sync sources from user provided tool dir to chromiumos tool git.
@@ -163,17 +161,15 @@
     cmd = 'cd {0} && git log -1 --pretty=oneline'.format(tool_dir)
     tool_dir_extra_info = None
     ret, tool_dir_extra_info, _ = self._ce.RunCommandWOutput(
-        cmd,
-        print_to_console=False)
+        cmd, print_to_console=False)
     commit_message = 'Synced with tool source tree at - "{0}".'.format(tool_dir)
     if not ret:
       commit_message += '\nGit log for {0}:\n{1}'.format(
           tool_dir, tool_dir_extra_info.strip())
 
     if chrome_tool_repo.CommitLocally(commit_message):
-      self._logger.LogError(
-          'Commit to local branch "{0}" failed, aborted.'.format(
-              TEMP_BRANCH_NAME))
+      self._logger.LogError('Commit to local branch "{0}" failed, aborted.'.
+                            format(TEMP_BRANCH_NAME))
       return False
     return True
 
@@ -219,12 +215,12 @@
       command = ('cd "{0}" && git cat-file -p {1} '
                  '| grep -E "^tree [a-f0-9]+$" '
                  '| cut -d" " -f2').format(chrome_tool_dir, tool_branch)
-      ret, stdout, _ = self._ce.RunCommandWOutput(command,
-                                                  print_to_console=False)
+      ret, stdout, _ = self._ce.RunCommandWOutput(
+          command, print_to_console=False)
       # Pipe operation always has a zero return value. So need to check if
       # stdout is valid.
-      if not ret and stdout and re.match('[0-9a-h]{40}', stdout.strip(),
-                                         re.IGNORECASE):
+      if not ret and stdout and re.match('[0-9a-h]{40}',
+                                         stdout.strip(), re.IGNORECASE):
         tool_branch_tree = stdout.strip()
         self._logger.LogOutput('Find tree for {0} branch "{1}" - "{2}"'.format(
             tool_name, tool_branch, tool_branch_tree))
@@ -270,8 +266,8 @@
     """
 
     # To get the active gcc ebuild file, we need a workable chroot first.
-    if not os.path.exists(os.path.join(
-        self._chromeos_root, 'chroot')) and self._ce.RunCommand(
+    if not os.path.exists(
+        os.path.join(self._chromeos_root, 'chroot')) and self._ce.RunCommand(
             'cd "{0}" && cros_sdk --create'.format(self._chromeos_root)):
       self._logger.LogError(('Failed to install a initial chroot, aborted.\n'
                              'If previous bootstrap failed, do a '
@@ -284,12 +280,12 @@
         'equery w sys-devel/{0}'.format(tool_name),
         print_to_console=True)
     if rv:
-      self._logger.LogError(('Failed to execute inside chroot '
-                             '"equery w sys-devel/{0}", aborted.').format(
-                                 tool_name))
+      self._logger.LogError(
+          ('Failed to execute inside chroot '
+           '"equery w sys-devel/{0}", aborted.').format(tool_name))
       return (False, None, None)
-    m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(EBUILD_PATH_PATTERN.format(
-        tool_name)), stdout)
+    m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(
+        EBUILD_PATH_PATTERN.format(tool_name)), stdout)
     if not m:
       self._logger.LogError(
           ('Failed to find {0} ebuild file, aborted. '
@@ -324,7 +320,6 @@
       tooltree = self._binutils_branch_tree
       toolebuild = self._binutils_ebuild_file
 
-
     assert tool
 
     # An example for the following variables would be:
@@ -336,10 +331,8 @@
     if not toolgithash:
       return False
     toolcomponents = 'toolchain/{}'.format(tool)
-    return self.InplaceModifyToolEbuildFile(toolcomponents,
-                                            toolgithash,
-                                            tooltree,
-                                            toolebuild)
+    return self.InplaceModifyToolEbuildFile(toolcomponents, toolgithash,
+                                            tooltree, toolebuild)
 
   @staticmethod
   def ResetToolEbuildFile(chromeos_root, tool_name):
@@ -357,8 +350,8 @@
         path=('sys-devel/{0}/{0}-*.ebuild'.format(tool_name)),
         staged=False)
     if rv:
-      cmd = 'cd {0} && git checkout --'.format(os.path.join(
-          chromeos_root, CHROMIUMOS_OVERLAY_PATH))
+      cmd = 'cd {0} && git checkout --'.format(
+          os.path.join(chromeos_root, CHROMIUMOS_OVERLAY_PATH))
       for g in rv:
         cmd += ' ' + g
       rv = command_executer.GetCommandExecuter().RunCommand(cmd)
@@ -401,12 +394,8 @@
                              repo, print_to_console=True))
     return repo
 
-
-  def InplaceModifyToolEbuildFile(self,
-                                  tool_components,
-                                  tool_branch_githash,
-                                  tool_branch_tree,
-                                  tool_ebuild_file):
+  def InplaceModifyToolEbuildFile(self, tool_components, tool_branch_githash,
+                                  tool_branch_tree, tool_ebuild_file):
     """Using sed to fill properly values into the ebuild file.
 
     Args:
@@ -433,10 +422,8 @@
                ' # The following line is modified by script.\' '
                '-e \'s!^CROS_WORKON_TREE=".*"$!CROS_WORKON_TREE="{3}"!\' '
                '{4}').format('/home/{}/ndk-root'.format(os.environ['USER']),
-                             tool_components,
-                             tool_branch_githash,
-                             tool_branch_tree,
-                             tool_ebuild_file)
+                             tool_components, tool_branch_githash,
+                             tool_branch_tree, tool_ebuild_file)
     rv = self._ce.RunCommand(command)
     if rv:
       self._logger.LogError(
@@ -477,12 +464,11 @@
       True if operation succeeds.
     """
 
-    chroot_ndk_root = os.path.join(self._chromeos_root, 'chroot',
-                                   'home', os.environ['USER'],
-                                   'ndk-root')
+    chroot_ndk_root = os.path.join(self._chromeos_root, 'chroot', 'home',
+                                   os.environ['USER'], 'ndk-root')
     self._ce.RunCommand('mkdir -p {}'.format(chroot_ndk_root))
-    if self._ce.RunCommand('sudo mount --bind {} {}'.format(
-        self._ndk_dir, chroot_ndk_root)):
+    if self._ce.RunCommand(
+        'sudo mount --bind {} {}'.format(self._ndk_dir, chroot_ndk_root)):
       self._logger.LogError('Failed to mount ndk dir into chroot')
       return False
 
@@ -509,25 +495,24 @@
           target_built.add(target)
           command = 'sudo emerge cross-{0}/{1}'.format(target, tool_name)
 
-        rv = self._ce.ChrootRunCommand(self._chromeos_root,
-                                       command,
-                                       print_to_console=True)
+        rv = self._ce.ChrootRunCommand(
+            self._chromeos_root, command, print_to_console=True)
         if rv:
-          self._logger.LogError('Build {0} failed for {1}, aborted.'.format(
-              tool_name, board))
+          self._logger.LogError(
+              'Build {0} failed for {1}, aborted.'.format(tool_name, board))
           failed.append(board)
         else:
-          self._logger.LogOutput('Successfully built {0} for board {1}.'.format(
-              tool_name, board))
+          self._logger.LogOutput(
+              'Successfully built {0} for board {1}.'.format(tool_name, board))
     finally:
       # Make sure we un-mount ndk-root before we leave here, regardless of the
       # build result of the tool. Otherwise we may inadvertently delete ndk-root
       # dir, which is not part of the chroot and could be disastrous.
       if chroot_ndk_root:
         if self._ce.RunCommand('sudo umount {}'.format(chroot_ndk_root)):
-          self._logger.LogWarning(('Failed to umount "{}", please check '
-                                   'before deleting chroot.').format(
-                                       chroot_ndk_root))
+          self._logger.LogWarning(
+              ('Failed to umount "{}", please check '
+               'before deleting chroot.').format(chroot_ndk_root))
 
       # Clean up soft links created during build.
       self._ce.RunCommand('cd {}/toolchain/{} && git clean -df'.format(
@@ -556,8 +541,8 @@
         self._chromeos_root, logfile)
     rv = self._ce.RunCommand(command, print_to_console=True)
     if rv:
-      self._logger.LogError('Bootstrapping failed, log file - "{0}"\n'.format(
-          logfile))
+      self._logger.LogError(
+          'Bootstrapping failed, log file - "{0}"\n'.format(logfile))
       return False
 
     self._logger.LogOutput('Bootstrap succeeded.')
@@ -642,87 +627,99 @@
 
 def Main(argv):
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      help=('Optional. ChromeOs root dir. '
-                            'When not specified, chromeos root will be deduced'
-                            ' from current working directory.'))
-  parser.add_argument('--ndk_dir',
-                      dest='ndk_dir',
-                      help=('Topmost android ndk dir, required. '
-                            'Do not need to include the "toolchain/*" part.'))
-  parser.add_argument('--gcc_branch',
-                      dest='gcc_branch',
-                      help=('The branch to test against. '
-                            'This branch must be a local branch '
-                            'inside "src/third_party/gcc". '
-                            'Notice, this must not be used with "--gcc_dir".'))
-  parser.add_argument('--binutils_branch',
-                      dest='binutils_branch',
-                      help=('The branch to test against binutils. '
-                            'This branch must be a local branch '
-                            'inside "src/third_party/binutils". '
-                            'Notice, this must not be used with '
-                            '"--binutils_dir".'))
-  parser.add_argument('-g',
-                      '--gcc_dir',
-                      dest='gcc_dir',
-                      help=('Use a local gcc tree to do bootstrapping. '
-                            'Notice, this must not be used with '
-                            '"--gcc_branch".'))
-  parser.add_argument('--binutils_dir',
-                      dest='binutils_dir',
-                      help=('Use a local binutils tree to do bootstrapping. '
-                            'Notice, this must not be used with '
-                            '"--binutils_branch".'))
-  parser.add_argument('--fixperm',
-                      dest='fixperm',
-                      default=False,
-                      action='store_true',
-                      help=('Fix the (notorious) permission error '
-                            'while trying to bootstrap the chroot. '
-                            'Note this takes an extra 10-15 minutes '
-                            'and is only needed once per chromiumos tree.'))
-  parser.add_argument('--setup_tool_ebuild_file_only',
-                      dest='setup_tool_ebuild_file_only',
-                      default=False,
-                      action='store_true',
-                      help=('Setup gcc and/or binutils ebuild file '
-                            'to pick up the branch (--gcc/binutils_branch) or '
-                            'use gcc and/or binutils source '
-                            '(--gcc/binutils_dir) and exit. Keep chroot as is.'
-                            ' This should not be used with '
-                            '--gcc/binutils_dir/branch options.'))
-  parser.add_argument('--reset_tool_ebuild_file',
-                      dest='reset_tool_ebuild_file',
-                      default=False,
-                      action='store_true',
-                      help=('Reset the modification that is done by this '
-                            'script. Note, when this script is running, it '
-                            'will modify the active gcc/binutils ebuild file. '
-                            'Use this option to reset (what this script has '
-                            'done) and exit. This should not be used with -- '
-                            'gcc/binutils_dir/branch options.'))
-  parser.add_argument('--board',
-                      dest='board',
-                      default=None,
-                      help=('Only build toolchain for specific board(s). '
-                            'Use "host" to build for host. '
-                            'Use "," to seperate multiple boards. '
-                            'This does not perform a chroot bootstrap.'))
-  parser.add_argument('--bootstrap',
-                      dest='bootstrap',
-                      default=False,
-                      action='store_true',
-                      help=('Performs a chroot bootstrap. '
-                            'Note, this will *destroy* your current chroot.'))
-  parser.add_argument('--disable-2nd-bootstrap',
-                      dest='disable_2nd_bootstrap',
-                      default=False,
-                      action='store_true',
-                      help=('Disable a second bootstrap '
-                            '(build of amd64-host stage).'))
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      help=('Optional. ChromeOs root dir. '
+            'When not specified, chromeos root will be deduced'
+            ' from current working directory.'))
+  parser.add_argument(
+      '--ndk_dir',
+      dest='ndk_dir',
+      help=('Topmost android ndk dir, required. '
+            'Do not need to include the "toolchain/*" part.'))
+  parser.add_argument(
+      '--gcc_branch',
+      dest='gcc_branch',
+      help=('The branch to test against. '
+            'This branch must be a local branch '
+            'inside "src/third_party/gcc". '
+            'Notice, this must not be used with "--gcc_dir".'))
+  parser.add_argument(
+      '--binutils_branch',
+      dest='binutils_branch',
+      help=('The branch to test against binutils. '
+            'This branch must be a local branch '
+            'inside "src/third_party/binutils". '
+            'Notice, this must not be used with '
+            '"--binutils_dir".'))
+  parser.add_argument(
+      '-g',
+      '--gcc_dir',
+      dest='gcc_dir',
+      help=('Use a local gcc tree to do bootstrapping. '
+            'Notice, this must not be used with '
+            '"--gcc_branch".'))
+  parser.add_argument(
+      '--binutils_dir',
+      dest='binutils_dir',
+      help=('Use a local binutils tree to do bootstrapping. '
+            'Notice, this must not be used with '
+            '"--binutils_branch".'))
+  parser.add_argument(
+      '--fixperm',
+      dest='fixperm',
+      default=False,
+      action='store_true',
+      help=('Fix the (notorious) permission error '
+            'while trying to bootstrap the chroot. '
+            'Note this takes an extra 10-15 minutes '
+            'and is only needed once per chromiumos tree.'))
+  parser.add_argument(
+      '--setup_tool_ebuild_file_only',
+      dest='setup_tool_ebuild_file_only',
+      default=False,
+      action='store_true',
+      help=('Setup gcc and/or binutils ebuild file '
+            'to pick up the branch (--gcc/binutils_branch) or '
+            'use gcc and/or binutils source '
+            '(--gcc/binutils_dir) and exit. Keep chroot as is.'
+            ' This should not be used with '
+            '--gcc/binutils_dir/branch options.'))
+  parser.add_argument(
+      '--reset_tool_ebuild_file',
+      dest='reset_tool_ebuild_file',
+      default=False,
+      action='store_true',
+      help=('Reset the modification that is done by this '
+            'script. Note, when this script is running, it '
+            'will modify the active gcc/binutils ebuild file. '
+            'Use this option to reset (what this script has '
+            'done) and exit. This should not be used with -- '
+            'gcc/binutils_dir/branch options.'))
+  parser.add_argument(
+      '--board',
+      dest='board',
+      default=None,
+      help=('Only build toolchain for specific board(s). '
+            'Use "host" to build for host. '
+            'Use "," to seperate multiple boards. '
+            'This does not perform a chroot bootstrap.'))
+  parser.add_argument(
+      '--bootstrap',
+      dest='bootstrap',
+      default=False,
+      action='store_true',
+      help=('Performs a chroot bootstrap. '
+            'Note, this will *destroy* your current chroot.'))
+  parser.add_argument(
+      '--disable-2nd-bootstrap',
+      dest='disable_2nd_bootstrap',
+      default=False,
+      action='store_true',
+      help=('Disable a second bootstrap '
+            '(build of amd64-host stage).'))
 
   options = parser.parse_args(argv)
   # Trying to deduce chromeos root from current directory.
@@ -740,12 +737,12 @@
     parser.error('Missing or failing to deduce mandatory option "--chromeos".')
     return 1
 
-  options.chromeos_root = os.path.abspath(os.path.expanduser(
-      options.chromeos_root))
+  options.chromeos_root = os.path.abspath(
+      os.path.expanduser(options.chromeos_root))
 
   if not os.path.isdir(options.chromeos_root):
-    logger.GetLogger().LogError('"{0}" does not exist.'.format(
-        options.chromeos_root))
+    logger.GetLogger().LogError(
+        '"{0}" does not exist.'.format(options.chromeos_root))
     return 1
 
   options.ndk_dir = os.path.expanduser(options.ndk_dir)
@@ -755,8 +752,8 @@
 
   # Some tolerance regarding user input. We only need the ndk_root part, do not
   # include toolchain/(gcc|binutils)/ part in this option.
-  options.ndk_dir = re.sub(
-      '/toolchain(/gcc|/binutils)?/?$', '', options.ndk_dir)
+  options.ndk_dir = re.sub('/toolchain(/gcc|/binutils)?/?$', '',
+                           options.ndk_dir)
 
   if not (os.path.isdir(options.ndk_dir) and
           os.path.isdir(os.path.join(options.ndk_dir, 'toolchain'))):
@@ -766,11 +763,11 @@
 
   if options.fixperm:
     # Fix perm error before continuing.
-    cmd = (
-        r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
-        r'\( -name "chroot" -type d -prune \) -o '
-        r'\( -type f -exec chmod a+r {{}} \; \) -o '
-        r'\( -type d -exec chmod a+rx {{}} \; \)').format(options.chromeos_root)
+    cmd = (r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
+           r'\( -name "chroot" -type d -prune \) -o '
+           r'\( -type f -exec chmod a+r {{}} \; \) -o '
+           r'\( -type d -exec chmod a+rx {{}} \; \)'
+          ).format(options.chromeos_root)
     logger.GetLogger().LogOutput(
         'Fixing perm issues for chromeos root, this might take some time.')
     command_executer.GetCommandExecuter().RunCommand(cmd)
@@ -792,8 +789,8 @@
   if options.gcc_dir:
     options.gcc_dir = os.path.abspath(os.path.expanduser(options.gcc_dir))
     if not os.path.isdir(options.gcc_dir):
-      logger.GetLogger().LogError('"{0}" does not exist.'.format(
-          options.gcc_dir))
+      logger.GetLogger().LogError(
+          '"{0}" does not exist.'.format(options.gcc_dir))
       return 1
 
   if options.gcc_branch and options.gcc_dir:
@@ -801,11 +798,11 @@
     return 1
 
   if options.binutils_dir:
-    options.binutils_dir = os.path.abspath(os.path.expanduser(
-        options.binutils_dir))
+    options.binutils_dir = os.path.abspath(
+        os.path.expanduser(options.binutils_dir))
     if not os.path.isdir(options.binutils_dir):
-      logger.GetLogger().LogError('"{0}" does not exist.'.format(
-          options.binutils_dir))
+      logger.GetLogger().LogError(
+          '"{0}" does not exist.'.format(options.binutils_dir))
       return 1
 
   if options.binutils_branch and options.binutils_dir:
@@ -813,8 +810,8 @@
                  '"--binutils_branch" can be specified.')
     return 1
 
-  if (not (options.binutils_branch or options.binutils_dir or options.gcc_branch
-           or options.gcc_dir)):
+  if (not (options.binutils_branch or options.binutils_dir or
+           options.gcc_branch or options.gcc_dir)):
     parser.error(('At least one of "--gcc_dir", "--gcc_branch", '
                   '"--binutils_dir" and "--binutils_branch" must '
                   'be specified.'))
diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py
index a43db75..5d68689 100755
--- a/buildbot_test_llvm.py
+++ b/buildbot_test_llvm.py
@@ -23,29 +23,29 @@
 
 from cros_utils import buildbot_utils
 
-# CL that uses LLVM to build the peppy image.
-USE_LLVM_PATCH = '295217'
-
 CROSTC_ROOT = '/usr/local/google/crostc'
 ROLE_ACCOUNT = 'mobiletc-prebuild'
 TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__))
 MAIL_PROGRAM = '~/var/bin/mail-sheriff'
 VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, 'validation_result')
 START_DATE = datetime.date(2016, 1, 1)
-TEST_PER_DAY = 2
+TEST_PER_DAY = 3
 TEST_BOARD = [
-    'squawks',      # x86_64, rambi  (baytrail)
-    'terra',        # x86_64, strago (braswell)
-    'lulu',         # x86_64, auron  (broadwell)
-    'peach_pit',    # arm,    peach  (exynos-5420)
-    'peppy',        # x86_64, slippy (haswell celeron)
-    'link',         # x86_64, ivybridge (ivybridge)
-    'nyan_big',     # arm,    nyan   (tegra)
-    'sentry',       # x86_64, kunimitsu (skylake-u)
-    'chell',        # x86_64, glados (skylake-y)
-    'daisy',        # arm,    daisy  (exynos)
-    'caroline',     # amd64
-    'kevin',        # arm,    gru  (Rockchip)
+    'squawks',  # x86_64, rambi  (baytrail)
+    'terra',  # x86_64, strago (braswell)
+    'lulu',  # x86_64, auron  (broadwell)
+    'peach_pit',  # arm,    peach  (exynos-5420)
+    'peppy',  # x86_64, slippy (haswell celeron)
+    'link',  # x86_64, ivybridge (ivybridge)
+    'nyan_big',  # arm,    nyan   (tegra)
+    'sentry',  # x86_64, kunimitsu (skylake-u)
+    'chell',  # x86_64, glados (skylake-y)
+    'daisy',  # arm,    daisy  (exynos)
+    'caroline',  # x86_64, glados (skylake-y)
+    'kevin',  # arm,    gru  (Rockchip)
+    'reef',  # x86_64, reef  (Apollo Lake)
+    'lakitu',
+    'whirlwind',
 ]
 
 
@@ -99,6 +99,7 @@
 
     return 0
 
+
 def Main(argv):
   """The main function."""
 
@@ -136,13 +137,10 @@
   if not options.compiler:
     print('Please specify which compiler to test (gcc, llvm, or llvm-next).')
     return 1
-  patches = options.patches
-  if not patches and options.compiler == 'llvm':
-    patches = USE_LLVM_PATCH
 
   if options.board:
     fv = ToolchainVerifier(options.board, options.chromeos_root,
-                           options.weekday, patches, options.compiler)
+                           options.weekday, options.patches, options.compiler)
     return fv.Doall()
 
   today = datetime.date.today()
@@ -154,7 +152,7 @@
     try:
       board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)]
       fv = ToolchainVerifier(board, options.chromeos_root, options.weekday,
-                             patches, options.compiler)
+                             options.patches, options.compiler)
       fv.DoAll()
     except SystemExit:
       logfile = os.path.join(VALIDATION_RESULT_DIR, options.compiler, board)
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index 5bef28a..06707be 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -28,14 +28,8 @@
 
 from cros_utils import buildbot_utils
 
-# CL that updated GCC ebuilds to use 'next_gcc'.
-USE_NEXT_GCC_PATCH = '230260'
-
-# CL that uses LLVM to build the peppy image.
-USE_LLVM_PATCH = '295217'
-
 # CL that uses LLVM-Next to build the images (includes chrome).
-USE_LLVM_NEXT_PATCH = '424123'
+USE_LLVM_NEXT_PATCH = '513590'
 
 CROSTC_ROOT = '/usr/local/google/crostc'
 ROLE_ACCOUNT = 'mobiletc-prebuild'
@@ -78,7 +72,7 @@
     self._ce = command_executer.GetCommandExecuter()
     self._l = logger.GetLogger()
     self._build = '%s-release' % board
-    self._patches = patches.split(',')
+    self._patches = patches.split(',') if patches else []
     self._patches_string = '_'.join(str(p) for p in self._patches)
     self._noschedv2 = noschedv2
 
@@ -105,9 +99,7 @@
     mo = re.search(TRYBOT_IMAGE_RE, trybot_image)
     assert mo
     dirname = IMAGE_DIR.replace('\\', '').format(**mo.groupdict())
-    version = buildbot_utils.GetGSContent(self._chromeos_root,
-                                          dirname + '/LATEST-master')
-    return dirname + '/' + version
+    return buildbot_utils.GetLatestImage(self._chromeos_root, dirname)
 
   def _GetNonAFDOImageName(self, trybot_image):
     """Given a trybot artifact name, get corresponding non-AFDO image name.
@@ -158,13 +150,10 @@
     experiment_file_dir = os.path.join(self._chromeos_root, '..', self._weekday)
     experiment_file_name = '%s_toolchain_experiment.txt' % self._board
 
-    compiler_string = 'gcc'
+    compiler_string = 'llvm'
     if USE_LLVM_NEXT_PATCH in self._patches_string:
       experiment_file_name = '%s_llvm_next_experiment.txt' % self._board
       compiler_string = 'llvm_next'
-    elif USE_LLVM_PATCH in self._patches_string:
-      experiment_file_name = '%s_llvm_experiment.txt' % self._board
-      compiler_string = 'llvm'
 
     experiment_file = os.path.join(experiment_file_dir, experiment_file_name)
     experiment_header = """
@@ -175,12 +164,12 @@
     experiment_tests = """
     benchmark: all_toolchain_perf {
       suite: telemetry_Crosperf
-      iterations: 3
+      iterations: 0
     }
 
     benchmark: page_cycler_v2.typical_25 {
       suite: telemetry_Crosperf
-      iterations: 2
+      iterations: 0
       run_local: False
       retries: 0
     }
@@ -195,7 +184,7 @@
           vanilla_image {
             chromeos_root: %s
             build: %s
-            compiler: gcc
+            compiler: llvm
           }
           """ % (self._chromeos_root, vanilla_image)
       f.write(official_image)
@@ -206,14 +195,12 @@
           nonafdo_image {
             chromeos_root: %s
             build: %s
-            compiler: gcc
+            compiler: llvm
           }
           """ % (self._chromeos_root, nonafdo_image)
         f.write(official_nonafdo_image)
 
       label_string = '%s_trybot_image' % compiler_string
-      if USE_NEXT_GCC_PATCH in self._patches:
-        label_string = 'gcc_next_trybot_image'
 
       # Reuse autotest files from vanilla image for trybot images
       autotest_files = os.path.join('/tmp', vanilla_image, 'autotest_files')
@@ -251,11 +238,9 @@
     filename = os.path.join(self._reports_dir, 'msg_body.html')
     if (os.path.exists(filename) and
         os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
-      email_title = 'buildbot test results'
+      email_title = 'buildbot llvm test results'
       if USE_LLVM_NEXT_PATCH in self._patches_string:
         email_title = 'buildbot llvm_next test results'
-      elif USE_LLVM_PATCH in self._patches_string:
-        email_title = 'buildbot llvm test results'
       command = ('cat %s | %s -s "%s, %s" -team -html' %
                  (filename, MAIL_PROGRAM, email_title, self._board))
       self._ce.RunCommand(command)
@@ -341,13 +326,9 @@
   if not options.chromeos_root:
     print('Please specify the ChromeOS root directory.')
     return 1
-  if options.patches:
-    patches = options.patches
-  else:
-    patches = USE_NEXT_GCC_PATCH
 
   fc = ToolchainComparator(options.board, options.remote, options.chromeos_root,
-                           options.weekday, patches, options.noschedv2)
+                           options.weekday, options.patches, options.noschedv2)
   return fc.DoAll()
 
 
diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py
index 68791ac..82e4e17 100755
--- a/chromiumos_image_diff.py
+++ b/chromiumos_image_diff.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Diff 2 chromiumos images by comparing each elf file.
 
    The script diffs every *ELF* files by dissembling every *executable*
@@ -53,8 +53,8 @@
       self.stateful = '/tmp/{0}.stateful'.format(mount_basename)
       self.unmount_script = '/tmp/{0}.unmount.sh'.format(mount_basename)
     else:
-      self.rootfs = tempfile.mkdtemp(suffix='.rootfs',
-                                     prefix='chromiumos_image_diff')
+      self.rootfs = tempfile.mkdtemp(
+          suffix='.rootfs', prefix='chromiumos_image_diff')
       ## rootfs is like /tmp/tmpxyz012.rootfs.
       match = re.match(r'^(.*)\.rootfs$', self.rootfs)
       basename = match.group(1)
@@ -78,14 +78,15 @@
   def CreateUnmountScript(self):
     command = ('sudo umount {r}/usr/local {r}/usr/share/oem '
                '{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; '
-               'rmdir {r} ; rmdir {s}\n').format(r=self.rootfs, s=self.stateful)
+               'rmdir {r} ; rmdir {s}\n').format(
+                   r=self.rootfs, s=self.stateful)
     f = open(self.unmount_script, 'w')
     f.write(command)
     f.close()
-    self._ce.RunCommand('chmod +x {}'.format(self.unmount_script),
-                        print_to_console=False)
-    self.logger.LogOutput('Created an unmount script - "{0}"'.format(
-        self.unmount_script))
+    self._ce.RunCommand(
+        'chmod +x {}'.format(self.unmount_script), print_to_console=False)
+    self.logger.LogOutput(
+        'Created an unmount script - "{0}"'.format(self.unmount_script))
 
   def UnmountImage(self):
     """Unmount the image and delete mount point."""
@@ -114,11 +115,12 @@
       Always true
     """
 
-    self.logger.LogOutput('Finding all elf files in "{0}" ...'.format(
-        self.rootfs))
+    self.logger.LogOutput(
+        'Finding all elf files in "{0}" ...'.format(self.rootfs))
     # Note '\;' must be prefixed by 'r'.
     command = ('find "{0}" -type f -exec '
-               'bash -c \'file -b "{{}}" | grep -q "ELF"\'' r' \; '
+               'bash -c \'file -b "{{}}" | grep -q "ELF"\''
+               r' \; '
                r'-exec echo "{{}}" \;').format(self.rootfs)
     self.logger.LogCmd(command)
     _, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
@@ -142,8 +144,8 @@
     if self.tempf1 and self.tempf2:
       command_executer.GetCommandExecuter().RunCommand(
           'rm {0} {1}'.format(self.tempf1, self.tempf2))
-      logger.GetLogger('Removed "{0}" and "{1}".'.format(
-          self.tempf1, self.tempf2))
+      logger.GetLogger(
+          'Removed "{0}" and "{1}".'.format(self.tempf1, self.tempf2))
 
   def CheckElfFileSetEquality(self):
     """Checking whether images have exactly number of elf files."""
@@ -183,8 +185,8 @@
     match_count = 0
     i1 = self.images[0]
     i2 = self.images[1]
-    self.logger.LogOutput('Start comparing {0} elf file by file ...'.format(
-        len(i1.elf_files)))
+    self.logger.LogOutput(
+        'Start comparing {0} elf file by file ...'.format(len(i1.elf_files)))
     ## Note - i1.elf_files and i2.elf_files have exactly the same entries here.
 
     ## Create 2 temp files to be used for all disassembed files.
@@ -205,35 +207,41 @@
             'Error:  We\'re comparing the SAME file - {0}'.format(f1))
         continue
 
-      command = ('objdump -d "{f1}" > {tempf1} ; '
-                 'objdump -d "{f2}" > {tempf2} ; '
-                 # Remove path string inside the dissemble
-                 'sed -i \'s!{rootfs1}!!g\' {tempf1} ; '
-                 'sed -i \'s!{rootfs2}!!g\' {tempf2} ; '
-                 'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format(
-                     f1=full_path1, f2=full_path2,
-                     rootfs1=i1.rootfs, rootfs2=i2.rootfs,
-                     tempf1=self.tempf1, tempf2=self.tempf2)
+      command = (
+          'objdump -d "{f1}" > {tempf1} ; '
+          'objdump -d "{f2}" > {tempf2} ; '
+          # Remove path string inside the dissemble
+          'sed -i \'s!{rootfs1}!!g\' {tempf1} ; '
+          'sed -i \'s!{rootfs2}!!g\' {tempf2} ; '
+          'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format(
+              f1=full_path1,
+              f2=full_path2,
+              rootfs1=i1.rootfs,
+              rootfs2=i2.rootfs,
+              tempf1=self.tempf1,
+              tempf2=self.tempf2)
       ret = cmde.RunCommand(command, print_to_console=False)
       if ret != 0:
-        self.logger.LogOutput('*** Not match - "{0}" "{1}"'.format(
-            full_path1, full_path2))
+        self.logger.LogOutput(
+            '*** Not match - "{0}" "{1}"'.format(full_path1, full_path2))
         mismatch_list.append(f1)
         if self.diff_file:
-          command = (
-              'echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" '
-              '>> {diff_file} ; diff {tempf1} {tempf2} '
-              '>> {diff_file}').format(
-                  f1=full_path1, f2=full_path2, diff_file=self.diff_file,
-                  tempf1=self.tempf1, tempf2=self.tempf2)
+          command = ('echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" '
+                     '>> {diff_file} ; diff {tempf1} {tempf2} '
+                     '>> {diff_file}').format(
+                         f1=full_path1,
+                         f2=full_path2,
+                         diff_file=self.diff_file,
+                         tempf1=self.tempf1,
+                         tempf2=self.tempf2)
           cmde.RunCommand(command, print_to_console=False)
       else:
         match_count += 1
     ## End of comparing every elf files.
 
     if not mismatch_list:
-      self.logger.LogOutput('** COOL, ALL {0} BINARIES MATCHED!! **'.format(
-          match_count))
+      self.logger.LogOutput(
+          '** COOL, ALL {0} BINARIES MATCHED!! **'.format(match_count))
       return True
 
     mismatch_str = 'Found {0} mismatch:\n'.format(len(mismatch_list))
@@ -252,24 +260,44 @@
 
   parser = argparse.ArgumentParser()
   parser.add_argument(
-      '--no_unmount', action='store_true', dest='no_unmount', default=False,
+      '--no_unmount',
+      action='store_true',
+      dest='no_unmount',
+      default=False,
       help='Do not unmount after finish, this is useful for debugging.')
   parser.add_argument(
-      '--chromeos_root', dest='chromeos_root', default=None, action='store',
+      '--chromeos_root',
+      dest='chromeos_root',
+      default=None,
+      action='store',
       help=('[Optional] Specify a chromeos tree instead of '
             'deducing it from image path so that we can compare '
             '2 images that are downloaded.'))
   parser.add_argument(
-      '--mount_basename', dest='mount_basename', default=None, action='store',
+      '--mount_basename',
+      dest='mount_basename',
+      default=None,
+      action='store',
       help=('Specify a meaningful name for the mount point. With this being '
             'set, the mount points would be "/tmp/mount_basename.x.rootfs" '
             ' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).'))
-  parser.add_argument('--diff_file', dest='diff_file', default=None,
-                      help='Dumping all the diffs (if any) to the diff file')
-  parser.add_argument('--image1', dest='image1', default=None,
-                      required=True, help=('Image 1 file name.'))
-  parser.add_argument('--image2', dest='image2', default=None,
-                      required=True, help=('Image 2 file name.'))
+  parser.add_argument(
+      '--diff_file',
+      dest='diff_file',
+      default=None,
+      help='Dumping all the diffs (if any) to the diff file')
+  parser.add_argument(
+      '--image1',
+      dest='image1',
+      default=None,
+      required=True,
+      help=('Image 1 file name.'))
+  parser.add_argument(
+      '--image2',
+      dest='image2',
+      default=None,
+      required=True,
+      help=('Image 2 file name.'))
   options = parser.parse_args(argv[1:])
 
   if options.mount_basename and options.mount_basename.find('/') >= 0:
diff --git a/command_executer_timeout_test.py b/command_executer_timeout_test.py
index ba0207e..26f3933 100755
--- a/command_executer_timeout_test.py
+++ b/command_executer_timeout_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Timeout test for command_executer."""
diff --git a/cros_login.py b/cros_login.py
index 32dfcb7..06ff8ff 100755
--- a/cros_login.py
+++ b/cros_login.py
@@ -1,9 +1,8 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010~2015 Google Inc. All Rights Reserved.
-"""Script to get past the login screen of ChromeOS.
+"""Script to get past the login screen of ChromeOS."""
 
-"""
 from __future__ import print_function
 
 import argparse
@@ -82,15 +81,17 @@
 
   with open(host_login_script, 'w') as f:
     f.write(full_login_script_contents)
-  ce.CopyFiles(host_login_script,
-               device_login_script,
-               dest_machine=remote,
-               chromeos_root=chromeos_root,
-               recursive=False,
-               dest_cros=True)
-  ret = ce.CrosRunCommand('python %s' % device_login_script,
-                          chromeos_root=chromeos_root,
-                          machine=remote)
+  ce.CopyFiles(
+      host_login_script,
+      device_login_script,
+      dest_machine=remote,
+      chromeos_root=chromeos_root,
+      recursive=False,
+      dest_cros=True)
+  ret = ce.CrosRunCommand(
+      'python %s' % device_login_script,
+      chromeos_root=chromeos_root,
+      machine=remote)
   if os.path.exists(host_login_script):
     os.remove(host_login_script)
   return ret
@@ -99,14 +100,10 @@
 def Main(argv):
   """The main function."""
   parser = argparse.ArgumentParser()
-  parser.add_argument('-r',
-                      '--remote',
-                      dest='remote',
-                      help='The remote ChromeOS box.')
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      help='The ChromeOS root.')
+  parser.add_argument(
+      '-r', '--remote', dest='remote', help='The remote ChromeOS box.')
+  parser.add_argument(
+      '-c', '--chromeos_root', dest='chromeos_root', help='The ChromeOS root.')
 
   options = parser.parse_args(argv)
 
diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py
index d140355..f89bb71 100644
--- a/cros_utils/buildbot_utils.py
+++ b/cros_utils/buildbot_utils.py
@@ -8,6 +8,7 @@
 import base64
 import json
 import os
+import re
 import time
 import urllib2
 
@@ -91,8 +92,14 @@
   point.)
   """
   for build_log in build_info:
-    if description in build_log['reason']:
-      return build_log
+    property_list = build_log['properties']
+    for prop in property_list:
+      if len(prop) < 2:
+        continue
+      pname = prop[0]
+      pvalue = prop[1]
+      if pname == 'name' and pvalue == description:
+        return build_log
   return {}
 
 
@@ -237,10 +244,9 @@
   if not patch_arg:
     command_prefix = 'yes | '
   command = ('%s ./cbuildbot --remote --nochromesdk %s'
-             ' --remote-description=%s %s %s %s' % (command_prefix,
-                                                    optional_flags, description,
-                                                    toolchain_flags, patch_arg,
-                                                    build))
+             ' --remote-description=%s %s %s %s' %
+             (command_prefix, optional_flags, description, toolchain_flags,
+              patch_arg, build))
   _, out, _ = ce.RunCommandWOutput(command)
   if 'Tryjob submitted!' not in out:
     logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
@@ -269,8 +275,8 @@
     build_info = GetBuildInfo(base_dir, build)
     if not build_info:
       if pending_time > TIME_OUT:
-        logger.GetLogger().LogFatal('Unable to get build logs for target %s.' %
-                                    build)
+        logger.GetLogger().LogFatal(
+            'Unable to get build logs for target %s.' % build)
       else:
         pending_message = 'Unable to find build log; job may be pending.'
         done = False
@@ -317,8 +323,8 @@
                                      (pending_time / 60))
         pending_time += SLEEP_TIME
       else:
-        logger.GetLogger().LogOutput('{0} minutes passed.'.format(running_time /
-                                                                  60))
+        logger.GetLogger().LogOutput(
+            '{0} minutes passed.'.format(running_time / 60))
         logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
         running_time += SLEEP_TIME
 
@@ -340,8 +346,8 @@
       trybot_image = FindArchiveImage(chromeos_root, build, build_id)
   if not trybot_image:
     logger.GetLogger().LogError('Trybot job %s failed with status %d;'
-                                ' no trybot image generated.' %
-                                (description, build_status))
+                                ' no trybot image generated.' % (description,
+                                                                 build_status))
 
   logger.GetLogger().LogOutput("trybot_image is '%s'" % trybot_image)
   logger.GetLogger().LogOutput('build_status is %d' % build_status)
@@ -375,11 +381,30 @@
   while elapsed_time < TIME_OUT:
     if DoesImageExist(chromeos_root, build):
       return
-    logger.GetLogger().LogOutput('Image %s not ready, waiting for 10 minutes' %
-                                 build)
+    logger.GetLogger().LogOutput(
+        'Image %s not ready, waiting for 10 minutes' % build)
     time.sleep(SLEEP_TIME)
     elapsed_time += SLEEP_TIME
 
   logger.GetLogger().LogOutput('Image %s not found, waited for %d hours' %
                                (build, (TIME_OUT / 3600)))
   raise BuildbotTimeout('Timeout while waiting for image %s' % build)
+
+
+def GetLatestImage(chromeos_root, path):
+  """Get latest image"""
+
+  fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)')
+
+  ce = command_executer.GetCommandExecuter()
+  command = ('gsutil ls gs://chromeos-image-archive/%s' % path)
+  _, out, _ = ce.ChrootRunCommandWOutput(
+      chromeos_root, command, print_to_console=False)
+  candidates = [l.split('/')[-2] for l in out.split()]
+  candidates = map(fmt.match, candidates)
+  candidates = [[int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m]
+  candidates.sort(reverse=True)
+  for c in candidates:
+      build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
+      if DoesImageExist(chromeos_root, build):
+          return build
diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py
index c561451..ae1b296 100644
--- a/cros_utils/command_executer.py
+++ b/cros_utils/command_executer.py
@@ -98,11 +98,13 @@
     # In this way the child cannot mess the parent's terminal.
     p = None
     try:
-      p = subprocess.Popen(cmd,
-                           stdout=subprocess.PIPE,
-                           stderr=subprocess.PIPE,
-                           shell=True,
-                           preexec_fn=os.setsid)
+      p = subprocess.Popen(
+          cmd,
+          stdout=subprocess.PIPE,
+          stderr=subprocess.PIPE,
+          shell=True,
+          preexec_fn=os.setsid,
+          executable='/bin/bash')
 
       full_stdout = ''
       full_stderr = ''
@@ -156,8 +158,7 @@
             if self.logger:
               self.logger.LogWarning('Timeout of %s seconds reached since '
                                      'process termination.' %
-                                     terminated_timeout,
-                                     print_to_console)
+                                     terminated_timeout, print_to_console)
             break
 
         if (command_timeout is not None and
@@ -277,14 +278,15 @@
 
     # Write all commands to a file.
     command_file = self.WriteToTempShFile(cmd)
-    retval = self.CopyFiles(command_file,
-                            command_file,
-                            dest_machine=machine,
-                            command_terminator=command_terminator,
-                            chromeos_root=chromeos_root,
-                            dest_cros=True,
-                            recursive=False,
-                            print_to_console=print_to_console)
+    retval = self.CopyFiles(
+        command_file,
+        command_file,
+        dest_machine=machine,
+        command_terminator=command_terminator,
+        chromeos_root=chromeos_root,
+        dest_cros=True,
+        recursive=False,
+        print_to_console=print_to_console)
     if retval:
       if self.logger:
         self.logger.LogError('Could not run remote command on machine.'
@@ -294,12 +296,13 @@
     command = self.RemoteAccessInitCommand(chromeos_root, machine)
     command += '\nremote_sh bash %s' % command_file
     command += "\nl_retval=$?; echo \"$REMOTE_OUT\"; exit $l_retval"
-    retval = self.RunCommandGeneric(command,
-                                    return_output,
-                                    command_terminator=command_terminator,
-                                    command_timeout=command_timeout,
-                                    terminated_timeout=terminated_timeout,
-                                    print_to_console=print_to_console)
+    retval = self.RunCommandGeneric(
+        command,
+        return_output,
+        command_terminator=command_terminator,
+        command_timeout=command_timeout,
+        terminated_timeout=terminated_timeout,
+        print_to_console=print_to_console)
     if return_output:
       connect_signature = (
           'Initiating first contact with remote host\n' + 'Connection OK\n')
@@ -368,8 +371,8 @@
     # the chroot already exists. We want the final returned output to skip
     # the output from chroot creation steps.
     if return_output:
-      ret = self.RunCommand('cd %s; cros_sdk %s -- true' %
-                            (chromeos_root, cros_sdk_options))
+      ret = self.RunCommand('cd %s; cros_sdk %s -- true' % (chromeos_root,
+                                                            cros_sdk_options))
       if ret:
         return (ret, '', '')
 
@@ -378,12 +381,13 @@
     command = ("cd %s; cros_sdk %s -- bash -c '%s/%s'" %
                (chromeos_root, cros_sdk_options, misc.CHROMEOS_SCRIPTS_DIR,
                 os.path.basename(command_file)))
-    ret = self.RunCommandGeneric(command,
-                                 return_output,
-                                 command_terminator=command_terminator,
-                                 command_timeout=command_timeout,
-                                 terminated_timeout=terminated_timeout,
-                                 print_to_console=print_to_console)
+    ret = self.RunCommandGeneric(
+        command,
+        return_output,
+        command_terminator=command_terminator,
+        command_timeout=command_timeout,
+        terminated_timeout=terminated_timeout,
+        print_to_console=print_to_console)
     os.remove(command_file)
     return ret
 
@@ -419,10 +423,11 @@
                   username=None,
                   command_terminator=None):
     cmd = ' ;\n'.join(cmdlist)
-    return self.RunCommand(cmd,
-                           machine=machine,
-                           username=username,
-                           command_terminator=command_terminator)
+    return self.RunCommand(
+        cmd,
+        machine=machine,
+        username=username,
+        command_terminator=command_terminator)
 
   def CopyFiles(self,
                 src,
@@ -464,18 +469,20 @@
       rsync_prefix = "\nrsync -r -e \"%s\" " % ssh_command
       if dest_cros == True:
         command += rsync_prefix + '%s root@%s:%s' % (src, dest_machine, dest)
-        return self.RunCommand(command,
-                               machine=src_machine,
-                               username=src_user,
-                               command_terminator=command_terminator,
-                               print_to_console=print_to_console)
+        return self.RunCommand(
+            command,
+            machine=src_machine,
+            username=src_user,
+            command_terminator=command_terminator,
+            print_to_console=print_to_console)
       else:
         command += rsync_prefix + 'root@%s:%s %s' % (src_machine, src, dest)
-        return self.RunCommand(command,
-                               machine=dest_machine,
-                               username=dest_user,
-                               command_terminator=command_terminator,
-                               print_to_console=print_to_console)
+        return self.RunCommand(
+            command,
+            machine=dest_machine,
+            username=dest_user,
+            command_terminator=command_terminator,
+            print_to_console=print_to_console)
 
     if dest_machine == src_machine:
       command = 'rsync -a %s %s' % (src, dest)
@@ -484,11 +491,12 @@
         src_machine = os.uname()[1]
         src_user = getpass.getuser()
       command = 'rsync -a %s@%s:%s %s' % (src_user, src_machine, src, dest)
-    return self.RunCommand(command,
-                           machine=dest_machine,
-                           username=dest_user,
-                           command_terminator=command_terminator,
-                           print_to_console=print_to_console)
+    return self.RunCommand(
+        command,
+        machine=dest_machine,
+        username=dest_user,
+        command_terminator=command_terminator,
+        print_to_console=print_to_console)
 
   def RunCommand2(self,
                   cmd,
@@ -557,9 +565,8 @@
       def notify_line(self):
         p = self._buf.find('\n')
         while p >= 0:
-          self._line_consumer(line=self._buf[:p + 1],
-                              output=self._name,
-                              pobject=self._pobject)
+          self._line_consumer(
+              line=self._buf[:p + 1], output=self._name, pobject=self._pobject)
           if p < len(self._buf) - 1:
             self._buf = self._buf[p + 1:]
             p = self._buf.find('\n')
@@ -571,9 +578,8 @@
       def notify_eos(self):
         # Notify end of stream. The last line may not end with a '\n'.
         if self._buf != '':
-          self._line_consumer(line=self._buf,
-                              output=self._name,
-                              pobject=self._pobject)
+          self._line_consumer(
+              line=self._buf, output=self._name, pobject=self._pobject)
           self._buf = ''
 
     if self.log_level == 'verbose':
@@ -605,15 +611,13 @@
       poll = select.poll()
       outfd = pobject.stdout.fileno()
       poll.register(outfd, select.POLLIN | select.POLLPRI)
-      handlermap = {outfd:
-                    StreamHandler(pobject, outfd, 'stdout', line_consumer)}
+      handlermap = {
+          outfd: StreamHandler(pobject, outfd, 'stdout', line_consumer)
+      }
       if not join_stderr:
         errfd = pobject.stderr.fileno()
-        poll.register(errfd,
-                      select.POLLIN | select.POLLPRI)
-        handlermap[errfd] = StreamHandler(pobject,
-                                          errfd,
-                                          'stderr',
+        poll.register(errfd, select.POLLIN | select.POLLPRI)
+        handlermap[errfd] = StreamHandler(pobject, errfd, 'stderr',
                                           line_consumer)
       while len(handlermap):
         readables = poll.poll(300)
diff --git a/cros_utils/manifest_versions.py b/cros_utils/manifest_versions.py
index 52fd700..47e2fb2 100644
--- a/cros_utils/manifest_versions.py
+++ b/cros_utils/manifest_versions.py
@@ -7,6 +7,7 @@
 
 __author__ = 'llozano@google.com (Luis Lozano)'
 
+import copy
 import os
 import re
 import shutil
@@ -48,8 +49,10 @@
     else:
       versions_git = (
           'https://chromium.googlesource.com/chromiumos/manifest-versions.git')
-    commands = ['cd {0}'.format(self.clone_location),
-                'git clone {0}'.format(versions_git)]
+    commands = [
+        'cd {0}'.format(self.clone_location),
+        'git clone {0}'.format(versions_git)
+    ]
     ret = self.ce.RunCommands(commands)
     if ret:
       logger.GetLogger().LogFatal('Failed to clone manifest-versions.')
@@ -58,26 +61,78 @@
     if self.clone_location:
       shutil.rmtree(self.clone_location)
 
+  def TimeToVersionChromeOS(self, my_time):
+    """Convert timestamp to version number, in ChromeOS/Paladin."""
+    cur_time = time.mktime(time.gmtime())
+    des_time = float(my_time)
+    if cur_time - des_time > 7000000:
+      logger.GetLogger().LogFatal('The time you specify is too early.')
+    commands = [
+        'cd {0}'.format(self.clone_location), 'cd manifest-versions',
+        'git checkout -f $(git rev-list' +
+        ' --max-count=1 --before={0} origin/master)'.format(my_time)
+    ]
+    ret = self.ce.RunCommands(commands)
+    if ret:
+      logger.GetLogger().LogFatal('Failed to checkout manifest at '
+                                  'specified time')
+    path = os.path.realpath(
+        '{0}/manifest-versions/LKGM/lkgm.xml'.format(self.clone_location))
+    pp = path.split('/')
+    new_list = copy.deepcopy(pp)
+    for i, e in enumerate(pp):
+      if e == 'android-LKGM-candidates':
+        new_list[i] = 'paladin'
+    chrome_path = '/'.join(new_list)
+    if not os.path.exists(chrome_path):
+      logger.GetLogger().LogOutput('LKGM path is %s' % path)
+      logger.GetLogger().LogOutput('Cannot find path %s' % chrome_path)
+      pieces = os.path.basename(chrome_path).split('.')
+      pieces = pieces[:-2]
+      new_base = '.'.join(pieces) + '*'
+      wild_path = os.path.join('/', '/'.join(new_list[:-1]), new_base)
+      command = 'ls %s' % wild_path
+      ret, out, _ = self.ce.RunCommandWOutput(command)
+      if ret == 0:
+        out = out.strip()
+        files = out.split('\n')
+        latest = files[-1]
+        small = os.path.basename(latest).split('.xml')[0]
+        version = pp[-2] + '.' + small
+    else:
+      small = os.path.basename(path).split('.xml')[0]
+      version = pp[-2] + '.' + small
+    commands = [
+        'cd {0}'.format(self.clone_location), 'cd manifest-versions',
+        'git checkout master'
+    ]
+    self.ce.RunCommands(commands)
+    return version
+
   def TimeToVersion(self, my_time):
     """Convert timestamp to version number."""
     cur_time = time.mktime(time.gmtime())
     des_time = float(my_time)
     if cur_time - des_time > 7000000:
       logger.GetLogger().LogFatal('The time you specify is too early.')
-    commands = ['cd {0}'.format(self.clone_location), 'cd manifest-versions',
-                'git checkout -f $(git rev-list' +
-                ' --max-count=1 --before={0} origin/master)'.format(my_time)]
+    commands = [
+        'cd {0}'.format(self.clone_location), 'cd manifest-versions',
+        'git checkout -f $(git rev-list' +
+        ' --max-count=1 --before={0} origin/master)'.format(my_time)
+    ]
     ret = self.ce.RunCommands(commands)
     if ret:
       logger.GetLogger().LogFatal('Failed to checkout manifest at '
                                   'specified time')
-    path = os.path.realpath('{0}/manifest-versions/LKGM/lkgm.xml'.format(
-        self.clone_location))
+    path = os.path.realpath(
+        '{0}/manifest-versions/LKGM/lkgm.xml'.format(self.clone_location))
     pp = path.split('/')
     small = os.path.basename(path).split('.xml')[0]
     version = pp[-2] + '.' + small
-    commands = ['cd {0}'.format(self.clone_location), 'cd manifest-versions',
-                'git checkout master']
+    commands = [
+        'cd {0}'.format(self.clone_location), 'cd manifest-versions',
+        'git checkout master'
+    ]
     self.ce.RunCommands(commands)
     return version
 
@@ -86,8 +141,10 @@
     assert not IsRFormatCrosVersion(version)
     version = version.split('.', 1)[1]
     os.chdir(self.clone_location)
-    files = [os.path.join(r, f)
-             for r, _, fs in os.walk('.') for f in fs if version in f]
+    files = [
+        os.path.join(r, f) for r, _, fs in os.walk('.') for f in fs
+        if version in f
+    ]
     if files:
       command = 'cp {0} {1}'.format(files[0], to_file)
       ret = self.ce.RunCommand(command)
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index 6c7d290..939ed66 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -66,7 +66,11 @@
 
 
 def GetFilenameFromString(string):
-  return ApplySubs(string, (r'/', '__'), (r'\s', '_'), (r'[\\$="?^]', ''),)
+  return ApplySubs(
+      string,
+      (r'/', '__'),
+      (r'\s', '_'),
+      (r'[\\$="?^]', ''),)
 
 
 def GetRoot(scr_name):
@@ -143,16 +147,16 @@
     withdebug_flag = '--nowithdebug'
   return ('%s/build_packages %s --withdev --withtest --withautotest '
           '--skip_toolchain_update %s --board=%s '
-          '--accept_licenses=@CHROMEOS' %
-          (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board))
+          '--accept_licenses=@CHROMEOS' % (CHROMEOS_SCRIPTS_DIR, usepkg_flag,
+                                           withdebug_flag, board))
 
 
 def GetBuildImageCommand(board, dev=False):
   dev_args = ''
   if dev:
     dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs'
-  return ('%s/build_image --board=%s %s test' %
-          (CHROMEOS_SCRIPTS_DIR, board, dev_args))
+  return ('%s/build_image --board=%s %s test' % (CHROMEOS_SCRIPTS_DIR, board,
+                                                 dev_args))
 
 
 def GetSetupBoardCommand(board,
@@ -179,8 +183,8 @@
 
   options.append('--accept_licenses=@CHROMEOS')
 
-  return ('%s/setup_board --board=%s %s' %
-          (CHROMEOS_SCRIPTS_DIR, board, ' '.join(options)))
+  return ('%s/setup_board --board=%s %s' % (CHROMEOS_SCRIPTS_DIR, board,
+                                            ' '.join(options)))
 
 
 def CanonicalizePath(path):
@@ -192,8 +196,8 @@
 def GetCtargetFromBoard(board, chromeos_root):
   """Get Ctarget from board."""
   base_board = board.split('_')[0]
-  command = ('source %s; get_ctarget_from_board %s' %
-             (TOOLCHAIN_UTILS_PATH, base_board))
+  command = ('source %s; get_ctarget_from_board %s' % (TOOLCHAIN_UTILS_PATH,
+                                                       base_board))
   ce = command_executer.GetCommandExecuter()
   ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
   if ret != 0:
@@ -206,8 +210,8 @@
 def GetArchFromBoard(board, chromeos_root):
   """Get Arch from board."""
   base_board = board.split('_')[0]
-  command = ('source %s; get_board_arch %s' %
-             (TOOLCHAIN_UTILS_PATH, base_board))
+  command = ('source %s; get_board_arch %s' % (TOOLCHAIN_UTILS_PATH,
+                                               base_board))
   ce = command_executer.GetCommandExecuter()
   ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
   if ret != 0:
@@ -318,16 +322,14 @@
   command = 'cd {0} && git diff --quiet --cached --exit-code HEAD'.format(
       git_dir)
   return command_executer.GetCommandExecuter().RunCommand(
-      command,
-      print_to_console=False)
+      command, print_to_console=False)
 
 
 def HasGitUnstagedChanges(git_dir):
   """Return True if git repository has un-staged changes."""
   command = 'cd {0} && git diff --quiet --exit-code HEAD'.format(git_dir)
   return command_executer.GetCommandExecuter().RunCommand(
-      command,
-      print_to_console=False)
+      command, print_to_console=False)
 
 
 def HasGitUntrackedChanges(git_dir):
@@ -335,8 +337,7 @@
   command = ('cd {0} && test -z '
              '$(git ls-files --exclude-standard --others)').format(git_dir)
   return command_executer.GetCommandExecuter().RunCommand(
-      command,
-      print_to_console=False)
+      command, print_to_console=False)
 
 
 def GitGetCommitHash(git_dir, commit_symbolic_name):
@@ -357,8 +358,7 @@
   command = ('cd {0} && git log -n 1 --pretty="format:%H" {1}').format(
       git_dir, commit_symbolic_name)
   rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
-      command,
-      print_to_console=False)
+      command, print_to_console=False)
   if rv == 0:
     return out.strip()
   return None
@@ -402,8 +402,7 @@
   if path:
     command += ' -- ' + path
   _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
-      command,
-      print_to_console=False)
+      command, print_to_console=False)
   rv = []
   for line in out.splitlines():
     rv.append(line)
@@ -411,8 +410,8 @@
 
 
 def IsChromeOsTree(chromeos_root):
-  return (os.path.isdir(os.path.join(chromeos_root,
-                                     'src/third_party/chromiumos-overlay')) and
+  return (os.path.isdir(
+      os.path.join(chromeos_root, 'src/third_party/chromiumos-overlay')) and
           os.path.isdir(os.path.join(chromeos_root, 'manifest')))
 
 
@@ -436,25 +435,22 @@
     print(cmd0)
   else:
     if command_executer.GetCommandExecuter().RunCommand(
-        cmd0,
-        print_to_console=True) != 0:
+        cmd0, print_to_console=True) != 0:
       return False
 
   cmd1 = ('export CHROMEOSDIRNAME="$(dirname $(cd {0} && pwd))" && '
           'export CHROMEOSBASENAME="$(basename $(cd {0} && pwd))" && '
-          'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME').format(
-              chromeos_root)
+          'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME'
+         ).format(chromeos_root)
   if dry_run:
     print(cmd1)
     return True
 
   return command_executer.GetCommandExecuter().RunCommand(
-      cmd1,
-      print_to_console=True) == 0
+      cmd1, print_to_console=True) == 0
 
 
-def ApplyGerritPatches(chromeos_root,
-                       gerrit_patch_string,
+def ApplyGerritPatches(chromeos_root, gerrit_patch_string,
                        branch='cros/master'):
   """Apply gerrit patches on a chromeos tree.
 
@@ -491,8 +487,8 @@
     pi_str = '{project}:{ref}'.format(project=pi.project, ref=pi.ref)
     try:
       project_git_path = project_checkout.GetPath(absolute=True)
-      logger.GetLogger().LogOutput('Applying patch "{0}" in "{1}" ...'.format(
-          pi_str, project_git_path))
+      logger.GetLogger().LogOutput(
+          'Applying patch "{0}" in "{1}" ...'.format(pi_str, project_git_path))
       pi.Apply(project_git_path, branch, trivial=False)
     except Exception:
       traceback.print_exc(file=sys.stdout)
@@ -521,8 +517,8 @@
   true_value, false_value = true_value.lower(), false_value.lower()
   true_text, false_text = true_value, false_value
   if true_value == false_value:
-    raise ValueError('true_value and false_value must differ: got %r' %
-                     true_value)
+    raise ValueError(
+        'true_value and false_value must differ: got %r' % true_value)
 
   if default:
     true_text = true_text[0].upper() + true_text[1:]
@@ -556,14 +552,16 @@
     elif false_value.startswith(response):
       return False
 
-def rgb2short(r, g, b):
-  """  Converts RGB values to xterm-256 color. """
 
-  redcolor = [255, 124, 160, 196, 9 ]
-  greencolor = [255, 118, 82, 46, 10 ]
+# pylint: disable=unused-argument
+def rgb2short(r, g, b):
+  """Converts RGB values to xterm-256 color."""
+
+  redcolor = [255, 124, 160, 196, 9]
+  greencolor = [255, 118, 82, 46, 10]
 
   if g == 0:
-    return redcolor[r/52]
+    return redcolor[r / 52]
   if r == 0:
-    return greencolor[g/52]
+    return greencolor[g / 52]
   return 4
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index 98f126b..6936d35 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -57,7 +57,6 @@
   cell_table = tf.GetCellTable()
   tp = TablePrinter(cell_table, out_to)
   print tp.Print()
-
 """
 
 from __future__ import print_function
@@ -464,12 +463,13 @@
     # --texture_upload_count--texture_upload_count--count (high is good)
     # --total_deferred_image_decode_count--count (low is good)
     # --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
-    lower_is_better_keys = ['milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes',
-                            'wrbytes', 'dropped_percent', '(ms)', '(seconds)',
-                            '--ms', '--average_num_missing_tiles',
-                            '--experimental_jank', '--experimental_mean_frame',
-                            '--experimental_median_frame_time',
-                            '--total_deferred_image_decode_count', '--seconds']
+    lower_is_better_keys = [
+        'milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes', 'wrbytes',
+        'dropped_percent', '(ms)', '(seconds)', '--ms',
+        '--average_num_missing_tiles', '--experimental_jank',
+        '--experimental_mean_frame', '--experimental_median_frame_time',
+        '--total_deferred_image_decode_count', '--seconds'
+    ]
 
     return any([l in key for l in lower_is_better_keys])
 
@@ -608,12 +608,13 @@
   def _ComputeFloat(self, cell):
     cell.string_value = '%0.2f' % float(cell.value)
     if float(cell.value) < 0.05:
-      cell.bgcolor = self._GetColor(cell.value,
-                                    Color(255, 255, 0, 0),
-                                    Color(255, 255, 255, 0),
-                                    Color(255, 255, 255, 0),
-                                    mid_value=0.05,
-                                    power=1)
+      cell.bgcolor = self._GetColor(
+          cell.value,
+          Color(255, 255, 0, 0),
+          Color(255, 255, 255, 0),
+          Color(255, 255, 255, 0),
+          mid_value=0.05,
+          power=1)
 
 
 class StorageFormat(Format):
@@ -647,12 +648,13 @@
 
   def _ComputeFloat(self, cell):
     cell.string_value = '%1.1f%%' % (float(cell.value) * 100)
-    cell.color = self._GetColor(cell.value,
-                                Color(0, 255, 0, 0),
-                                Color(0, 0, 0, 0),
-                                Color(255, 0, 0, 0),
-                                mid_value=0.02,
-                                power=1)
+    cell.color = self._GetColor(
+        cell.value,
+        Color(0, 255, 0, 0),
+        Color(0, 0, 0, 0),
+        Color(255, 0, 0, 0),
+        mid_value=0.02,
+        power=1)
 
 
 class PercentFormat(Format):
@@ -664,7 +666,8 @@
 
   def _ComputeFloat(self, cell):
     cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
-    cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+    cell.color = self._GetColor(cell.value,
+                                Color(255, 0, 0, 0),
                                 Color(0, 0, 0, 0), Color(0, 255, 0, 0))
 
 
@@ -677,7 +680,8 @@
 
   def _ComputeFloat(self, cell):
     cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
-    cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+    cell.color = self._GetColor(cell.value,
+                                Color(255, 0, 0, 0),
                                 Color(0, 0, 0, 0), Color(0, 255, 0, 0))
 
 
@@ -693,7 +697,8 @@
 
   def _ComputeFloat(self, cell):
     cell.string_value = '--'
-    bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
+    bgcolor = self._GetColor(cell.value,
+                             Color(255, 0, 0, 0),
                              Color(255, 255, 255, 0), Color(0, 255, 0, 0))
     cell.bgcolor = bgcolor
     cell.color = bgcolor
@@ -889,8 +894,8 @@
   def AddLabelName(self):
     """Put label on the top of the table."""
     top_header = []
-    base_colspan = len([c for c in self._columns if not c.result.NeedsBaseline()
-                       ])
+    base_colspan = len(
+        [c for c in self._columns if not c.result.NeedsBaseline()])
     compare_colspan = len(self._columns)
     # Find the row with the key 'retval', if it exists.  This
     # will be used to calculate the number of iterations that passed and
@@ -1179,14 +1184,17 @@
   """
   tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
   table = tg.GetTable()
-  columns = [Column(LiteralResult(), Format(), 'Literal'),
-             Column(AmeanResult(), Format()), Column(StdResult(), Format()),
-             Column(CoeffVarResult(), CoeffVarFormat()),
-             Column(NonEmptyCountResult(), Format()),
-             Column(AmeanRatioResult(), PercentFormat()),
-             Column(AmeanRatioResult(), RatioFormat()),
-             Column(GmeanRatioResult(), RatioFormat()),
-             Column(PValueResult(), PValueFormat())]
+  columns = [
+      Column(LiteralResult(), Format(), 'Literal'), Column(
+          AmeanResult(), Format()), Column(StdResult(), Format()), Column(
+              CoeffVarResult(), CoeffVarFormat()), Column(
+                  NonEmptyCountResult(), Format()),
+      Column(AmeanRatioResult(), PercentFormat()), Column(
+          AmeanRatioResult(), RatioFormat()), Column(GmeanRatioResult(),
+                                                     RatioFormat()), Column(
+                                                         PValueResult(),
+                                                         PValueFormat())
+  ]
   tf = TableFormatter(table, columns)
   cell_table = tf.GetCellTable()
   tp = TablePrinter(cell_table, out_to)
@@ -1195,38 +1203,55 @@
 
 if __name__ == '__main__':
   # Run a few small tests here.
-  runs = [[{'k1': '10',
-            'k2': '12',
-            'k5': '40',
-            'k6': '40',
-            'ms_1': '20',
-            'k7': 'FAIL',
-            'k8': 'PASS',
-            'k9': 'PASS',
-            'k10': '0'}, {'k1': '13',
-                          'k2': '14',
-                          'k3': '15',
-                          'ms_1': '10',
-                          'k8': 'PASS',
-                          'k9': 'FAIL',
-                          'k10': '0'}], [{'k1': '50',
-                                          'k2': '51',
-                                          'k3': '52',
-                                          'k4': '53',
-                                          'k5': '35',
-                                          'k6': '45',
-                                          'ms_1': '200',
-                                          'ms_2': '20',
-                                          'k7': 'FAIL',
-                                          'k8': 'PASS',
-                                          'k9': 'PASS'}]]
+  runs = [[{
+      'k1': '10',
+      'k2': '12',
+      'k5': '40',
+      'k6': '40',
+      'ms_1': '20',
+      'k7': 'FAIL',
+      'k8': 'PASS',
+      'k9': 'PASS',
+      'k10': '0'
+  }, {
+      'k1': '13',
+      'k2': '14',
+      'k3': '15',
+      'ms_1': '10',
+      'k8': 'PASS',
+      'k9': 'FAIL',
+      'k10': '0'
+  }], [{
+      'k1': '50',
+      'k2': '51',
+      'k3': '52',
+      'k4': '53',
+      'k5': '35',
+      'k6': '45',
+      'ms_1': '200',
+      'ms_2': '20',
+      'k7': 'FAIL',
+      'k8': 'PASS',
+      'k9': 'PASS'
+  }]]
   labels = ['vanilla', 'modified']
   t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
   print(t)
   email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
 
-  runs = [[{'k1': '1'}, {'k1': '1.1'}, {'k1': '1.2'}],
-          [{'k1': '5'}, {'k1': '5.1'}, {'k1': '5.2'}]]
+  runs = [[{
+      'k1': '1'
+  }, {
+      'k1': '1.1'
+  }, {
+      'k1': '1.2'
+  }], [{
+      'k1': '5'
+  }, {
+      'k1': '5.1'
+  }, {
+      'k1': '5.2'
+  }]]
   t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
   print(t)
 
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index a2a34bc..bbb1cdf 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,9 +1,40 @@
-
 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """Define a type that wraps a Benchmark instance."""
+from __future__ import print_function
+
+import math
+from scipy import stats
+
+# See crbug.com/673558 for how these are estimated.
+_estimated_stddev = {
+    'octane': 0.015,
+    'kraken': 0.019,
+    'speedometer': 0.007,
+    'dromaeo.domcoreattr': 0.023,
+    'dromaeo.domcoremodify': 0.011,
+    'smoothness.tough_webgl_cases': 0.025,
+    'graphics_WebGLAquarium': 0.008,
+    'page_cycler_v2.typical_25': 0.021,
+}
+
+
+# Get #samples needed to guarantee a given confidence interval, assuming the
+# samples follow normal distribution.
+def _samples(b):
+  # TODO: Make this an option
+  # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
+  p = 0.9
+  e = 0.02
+  if b not in _estimated_stddev:
+    return 1
+  d = _estimated_stddev[b]
+  # Get at least 2 samples so as to calculate standard deviation, which is
+  # needed in T-test for p-value.
+  n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
+  return n if n > 1 else 2
+
 
 class Benchmark(object):
   """Class representing a benchmark to be run.
@@ -31,7 +62,7 @@
     self.test_name = test_name
     #For telemetry, this is the data.
     self.test_args = test_args
-    self.iterations = iterations
+    self.iterations = iterations if iterations > 0 else _samples(name)
     self.perf_args = perf_args
     self.rm_chroot_tmp = rm_chroot_tmp
     self.iteration_adjusted = False
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index e53187e..bba71a3 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,8 +1,6 @@
-
 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """Module of benchmark runs."""
 from __future__ import print_function
 
@@ -30,6 +28,7 @@
 
 class BenchmarkRun(threading.Thread):
   """The benchmarkrun class."""
+
   def __init__(self, name, benchmark, label, iteration, cache_conditions,
                machine_manager, logger_to_use, log_level, share_cache):
     threading.Thread.__init__(self)
@@ -53,8 +52,8 @@
     self.test_args = benchmark.test_args
     self.cache = None
     self.profiler_args = self.GetExtraAutotestArgs()
-    self._ce = command_executer.GetCommandExecuter(self._logger,
-                                                   log_level=self.log_level)
+    self._ce = command_executer.GetCommandExecuter(
+        self._logger, log_level=self.log_level)
     self.timeline = timeline.Timeline()
     self.timeline.Record(STATUS_PENDING)
     self.share_cache = share_cache
@@ -96,8 +95,7 @@
         err = 'No cache hit.'
         self.result = Result.CreateFromRun(
             self._logger, self.log_level, self.label, self.machine, output, err,
-            retval, self.benchmark.test_name,
-            self.benchmark.suite)
+            retval, self.benchmark.test_name, self.benchmark.suite)
 
       else:
         self._logger.LogOutput('%s: No cache hit.' % self.name)
@@ -141,8 +139,8 @@
         pass
       elif self.machine:
         if not self.machine.IsReachable():
-          self._logger.LogOutput('Machine %s is not reachable, removing it.' %
-                                 self.machine.name)
+          self._logger.LogOutput(
+              'Machine %s is not reachable, removing it.' % self.machine.name)
           self.machine_manager.RemoveMachine(self.machine.name)
         self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
         self.machine_manager.ReleaseMachine(self.machine)
@@ -190,8 +188,10 @@
       perf_args = ' '.join(perf_args_list)
       if not perf_args_list[0] in ['record', 'stat']:
         raise SyntaxError('perf_args must start with either record or stat')
-      extra_test_args = ['--profiler=custom_perf',
-                         ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+      extra_test_args = [
+          '--profiler=custom_perf',
+          ("--profiler_args='perf_options=\"%s\"'" % perf_args)
+      ]
       return ' '.join(extra_test_args)
     else:
       return ''
@@ -254,9 +254,9 @@
     self.timeline.Record(STATUS_IMAGING)
     self.machine_manager.ImageMachine(machine, self.label)
     self.timeline.Record(STATUS_RUNNING)
-    [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
-                                               self.benchmark, self.test_args,
-                                               self.profiler_args)
+    [retval, out,
+     err] = self.suite_runner.Run(machine.name, self.label, self.benchmark,
+                                  self.test_args, self.profiler_args)
     self.run_completed = True
     rr = MockResult('logger', self.label, self.log_level, machine)
     rr.out = out
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 9af66a3..74757ac 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -117,11 +117,10 @@
     pass
 
   def test_run(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     def MockLogOutput(msg, print_to_console=False):
       'Helper function for test_run.'
@@ -258,11 +257,10 @@
     self.assertEqual(self.status, ['FAILED'])
 
   def test_terminate_pass(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     def GetLastEventPassed():
       'Helper function for test_terminate_pass'
@@ -286,11 +284,10 @@
     self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
 
   def test_terminate_fail(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     def GetLastEventFailed():
       'Helper function for test_terminate_fail'
@@ -314,11 +311,10 @@
     self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
 
   def test_acquire_machine(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     br.terminated = True
     self.assertRaises(Exception, br.AcquireMachine)
@@ -332,11 +328,10 @@
     self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
 
   def test_get_extra_autotest_args(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     def MockLogError(err_msg):
       'Helper function for test_get_extra_autotest_args'
@@ -372,11 +367,10 @@
   @mock.patch.object(SuiteRunner, 'Run')
   @mock.patch.object(Result, 'CreateFromRun')
   def test_run_test(self, mock_result, mock_runner):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     self.status = []
 
@@ -391,9 +385,9 @@
     br.RunTest(mock_machine)
 
     self.assertTrue(br.run_completed)
-    self.assertEqual(
-        self.status,
-        [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
+    self.assertEqual(self.status, [
+        benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING
+    ])
 
     self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
     br.machine_manager.ImageMachine.assert_called_with(mock_machine,
@@ -403,17 +397,15 @@
                                    '', br.profiler_args)
 
     self.assertEqual(mock_result.call_count, 1)
-    mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
-                                   None, "{'Score':100}", '', 0,
-                                   'page_cycler.netsim.top_10',
-                                   'telemetry_Crosperf')
+    mock_result.assert_called_with(
+        self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+        0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
 
   def test_set_cache_conditions(self):
-    br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
-                                    self.test_label, 1,
-                                    self.test_cache_conditions,
-                                    self.mock_machine_manager, self.mock_logger,
-                                    'average', '')
+    br = benchmark_run.BenchmarkRun(
+        'test_run', self.test_benchmark, self.test_label, 1,
+        self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+        'average', '')
 
     phony_cache_conditions = [123, 456, True, False]
 
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 320ede6..24c364c 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -16,43 +16,47 @@
 
   def test_benchmark(self):
     # Test creating a benchmark with all the fields filled out.
-    b1 = Benchmark('b1_test',  # name
-                   'octane',  # test_name
-                   '',  # test_args
-                   3,  # iterations
-                   False,  # rm_chroot_tmp
-                   'record -e cycles',  # perf_args
-                   'telemetry_Crosperf',  # suite
-                   True)  # show_all_results
+    b1 = Benchmark(
+        'b1_test',  # name
+        'octane',  # test_name
+        '',  # test_args
+        3,  # iterations
+        False,  # rm_chroot_tmp
+        'record -e cycles',  # perf_args
+        'telemetry_Crosperf',  # suite
+        True)  # show_all_results
     self.assertTrue(b1.suite, 'telemetry_Crosperf')
 
     # Test creating a benchmark field with default fields left out.
-    b2 = Benchmark('b2_test',  # name
-                   'octane',  # test_name
-                   '',  # test_args
-                   3,  # iterations
-                   False,  # rm_chroot_tmp
-                   'record -e cycles')  # perf_args
+    b2 = Benchmark(
+        'b2_test',  # name
+        'octane',  # test_name
+        '',  # test_args
+        3,  # iterations
+        False,  # rm_chroot_tmp
+        'record -e cycles')  # perf_args
     self.assertEqual(b2.suite, '')
     self.assertFalse(b2.show_all_results)
 
     # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
     # and see what happens.
-    b3 = Benchmark('b3_test',  # name
-                   'octane',  # test_name
-                   '',  # test_args
-                   3,  # iterations
-                   False,  # rm_chroot_tmp
-                   'record -e cycles',  # perf_args
-                   'telemetry',  # suite
-                   False)  # show_all_results
+    b3 = Benchmark(
+        'b3_test',  # name
+        'octane',  # test_name
+        '',  # test_args
+        3,  # iterations
+        False,  # rm_chroot_tmp
+        'record -e cycles',  # perf_args
+        'telemetry',  # suite
+        False)  # show_all_results
     self.assertTrue(b3.show_all_results)
 
     # Check to see if the args to Benchmark have changed since the last time
     # this test was updated.
-    args_list = ['self', 'name', 'test_name', 'test_args', 'iterations',
-                 'rm_chroot_tmp', 'perf_args', 'suite', 'show_all_results',
-                 'retries', 'run_local']
+    args_list = [
+        'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
+        'perf_args', 'suite', 'show_all_results', 'retries', 'run_local'
+    ]
     arg_spec = inspect.getargspec(Benchmark.__init__)
     self.assertEqual(len(arg_spec.args), len(args_list))
     for arg in args_list:
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index 0a61eeb..34513a8 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -22,10 +22,11 @@
 def Main(argv):
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('--chromeos_root',
-                      default='/path/to/chromeos',
-                      dest='chromeos_root',
-                      help='ChromeOS root checkout directory')
+  parser.add_argument(
+      '--chromeos_root',
+      default='/path/to/chromeos',
+      dest='chromeos_root',
+      help='ChromeOS root checkout directory')
   parser.add_argument('remotes', nargs=argparse.REMAINDER)
 
   options = parser.parse_args(argv)
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 4a46896..b361f15 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -42,12 +42,13 @@
 
   def test_convert_options_to_settings(self):
     parser = argparse.ArgumentParser()
-    parser.add_argument('-l',
-                        '--log_dir',
-                        dest='log_dir',
-                        default='',
-                        help='The log_dir, default is under '
-                             '<crosperf_logs>/logs')
+    parser.add_argument(
+        '-l',
+        '--log_dir',
+        dest='log_dir',
+        default='',
+        help='The log_dir, default is under '
+        '<crosperf_logs>/logs')
     crosperf.SetupParserOptions(parser)
     argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
     options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8ceaa87..ad0a812 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -56,8 +56,8 @@
     # image name.
     command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
                "python translate_xbuddy.py '%s'" % xbuddy_label)
-    _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(chromeos_root,
-                                                                command)
+    _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
+        chromeos_root, command)
     if not build_id_tuple_str:
       raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
 
@@ -143,8 +143,8 @@
       cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
       status = self._ce.RunCommand(cmd)
     if status != 0:
-      raise MissingFile('Cannot find autotest package file: %s.' %
-                        package_file_name)
+      raise MissingFile(
+          'Cannot find autotest package file: %s.' % package_file_name)
 
     if self.log_level == 'average':
       self._logger.LogOutput('Preparing to download %s package to local '
@@ -171,8 +171,8 @@
                                    package_file_name, uncompress_cmd):
     # Uncompress file
     download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
-    command = ('cd %s ; %s %s' %
-               (download_path, uncompress_cmd, package_file_name))
+    command = ('cd %s ; %s %s' % (download_path, uncompress_cmd,
+                                  package_file_name))
 
     if self.log_level != 'verbose':
       self._logger.LogOutput('CMD: %s' % command)
@@ -193,8 +193,8 @@
   def VerifyAutotestFilesExist(self, chromeos_root, build_id, package_file):
     # Quickly verify if the files are there
     status = 0
-    gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
-                       (build_id, package_file))
+    gs_package_name = ('gs://chromeos-image-archive/%s/%s' % (build_id,
+                                                              package_file))
     gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
     if not test_flag.GetTestMode():
       cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
@@ -227,9 +227,9 @@
                                              autotest_packages_name)
       if status != 0:
         default_autotest_dir = '~/trunk/src/third_party/autotest/files'
-        print('(Warning: Could not find autotest packages .)\n'
-              '(Warning: Defaulting autotest path to %s .' %
-              default_autotest_dir)
+        print(
+            '(Warning: Could not find autotest packages .)\n'
+            '(Warning: Defaulting autotest path to %s .' % default_autotest_dir)
         return default_autotest_dir
 
       # Files exist on server, download and uncompress them
@@ -242,12 +242,10 @@
 
       self.UncompressSingleAutotestFile(chromeos_root, build_id,
                                         autotest_packages_name, 'tar -xvf ')
-      self.UncompressSingleAutotestFile(chromeos_root, build_id,
-                                        autotest_server_package_name,
-                                        'tar -jxvf ')
-      self.UncompressSingleAutotestFile(chromeos_root, build_id,
-                                        autotest_control_files_name,
-                                        'tar -xvf ')
+      self.UncompressSingleAutotestFile(
+          chromeos_root, build_id, autotest_server_package_name, 'tar -jxvf ')
+      self.UncompressSingleAutotestFile(
+          chromeos_root, build_id, autotest_control_files_name, 'tar -xvf ')
       # Rename created autotest directory to autotest_files
       command = ('cd %s ; mv autotest autotest_files' % download_path)
       if self.log_level != 'verbose':
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 7a4f385..349a2db 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -126,8 +126,8 @@
     # 2nd arg must be exception handler
     except_handler_string = 'RunCommandExceptionHandler.HandleException'
     self.assertTrue(
-        except_handler_string in
-        repr(mock_cmd_exec.RunCommand.call_args_list[0][1]))
+        except_handler_string in repr(
+            mock_cmd_exec.RunCommand.call_args_list[0][1]))
 
     # Call 2, should have 2 arguments
     self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index dbcde21..987318a 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -126,10 +126,11 @@
           full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
           logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
                                         True)
-          benchmark_runs.append(benchmark_run.BenchmarkRun(
-              benchmark_run_name, benchmark, label, iteration,
-              self.cache_conditions, self.machine_manager, logger_to_use,
-              self.log_level, self.share_cache))
+          benchmark_runs.append(
+              benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+                                         iteration, self.cache_conditions,
+                                         self.machine_manager, logger_to_use,
+                                         self.log_level, self.share_cache))
 
     return benchmark_runs
 
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 2278015..9d58048 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -78,11 +78,13 @@
     'power_Resume',
     'video_PlaybackPerf.h264',
     'build_RootFilesystemSize',
+]
+
 #    'cheets_AntutuTest',
 #    'cheets_PerfBootServer',
 #    'cheets_CandyCrushTest',
 #    'cheets_LinpackTest',
-]
+#]
 
 
 class ExperimentFactory(object):
@@ -98,10 +100,9 @@
                          show_all_results, retries, run_local):
     """Add all the tests in a set to the benchmarks list."""
     for test_name in benchmark_list:
-      telemetry_benchmark = Benchmark(test_name, test_name, test_args,
-                                      iterations, rm_chroot_tmp, perf_args,
-                                      suite, show_all_results, retries,
-                                      run_local)
+      telemetry_benchmark = Benchmark(
+          test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+          suite, show_all_results, retries, run_local)
       benchmarks.append(telemetry_benchmark)
 
   def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -210,20 +211,33 @@
           benchmarks.append(benchmark)
       else:
         if test_name == 'all_graphics_perf':
-          self.AppendBenchmarkSet(benchmarks,
-                                  graphics_perf_tests, '',
-                                  iterations, rm_chroot_tmp, perf_args, '',
-                                  show_all_results, retries, run_local=False)
+          self.AppendBenchmarkSet(
+              benchmarks,
+              graphics_perf_tests,
+              '',
+              iterations,
+              rm_chroot_tmp,
+              perf_args,
+              '',
+              show_all_results,
+              retries,
+              run_local=False)
         elif test_name == 'all_crosbolt_perf':
-          self.AppendBenchmarkSet(benchmarks,
-                                  telemetry_crosbolt_perf_tests, test_args,
-                                  iterations, rm_chroot_tmp, perf_args,
-                                  'telemetry_Crosperf', show_all_results,
-                                  retries, run_local)
-          self.AppendBenchmarkSet(benchmarks,
-                                  crosbolt_perf_tests, '',
-                                  iterations, rm_chroot_tmp, perf_args, '',
-                                  show_all_results, retries, run_local=False)
+          self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
+                                  test_args, iterations, rm_chroot_tmp,
+                                  perf_args, 'telemetry_Crosperf',
+                                  show_all_results, retries, run_local)
+          self.AppendBenchmarkSet(
+              benchmarks,
+              crosbolt_perf_tests,
+              '',
+              iterations,
+              rm_chroot_tmp,
+              perf_args,
+              '',
+              show_all_results,
+              retries,
+              run_local=False)
         else:
           # Add the single benchmark.
           benchmark = Benchmark(
@@ -265,11 +279,8 @@
         build = label_settings.GetField('build')
         if len(build) == 0:
           raise RuntimeError("Can not have empty 'build' field!")
-        image, autotest_path = label_settings.GetXbuddyPath(build,
-                                                            autotest_path,
-                                                            board,
-                                                            chromeos_root,
-                                                            log_level)
+        image, autotest_path = label_settings.GetXbuddyPath(
+            build, autotest_path, board, chromeos_root, log_level)
 
       cache_dir = label_settings.GetField('cache_dir')
       chrome_src = label_settings.GetField('chrome_src')
@@ -277,8 +288,8 @@
       # TODO(yunlian): We should consolidate code in machine_manager.py
       # to derermine whether we are running from within google or not
       if ('corp.google.com' in socket.gethostname() and
-          (not my_remote or my_remote == remote and
-           global_settings.GetField('board') != board)):
+          (not my_remote or
+           my_remote == remote and global_settings.GetField('board') != board)):
         my_remote = self.GetDefaultRemotes(board)
       if global_settings.GetField('same_machine') and len(my_remote) > 1:
         raise RuntimeError('Only one remote is allowed when same_machine '
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 02bfd0a..44090e5 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -175,9 +175,9 @@
     test_flag.SetTestMode(True)
     label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
     exp = ef.GetExperiment(mock_experiment_file, '', '')
-    self.assertEqual(
-        exp.remote,
-        ['chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'])
+    self.assertEqual(exp.remote, [
+        'chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'
+    ])
 
     # Third test: Automatic fixing of bad  logging_level param:
     global_settings.SetField('logging_level', 'really loud!')
@@ -213,9 +213,9 @@
     self.assertEqual(len(exp.labels), 2)
     self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
     self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
-    self.assertEqual(
-        exp.remote,
-        ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
+    self.assertEqual(exp.remote, [
+        'fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'
+    ])
 
   def test_get_default_remotes(self):
     board_list = [
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 016e9d8..57eb52d 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -114,8 +114,8 @@
         elif ExperimentFile._OPEN_SETTINGS_RE.match(line):
           new_settings = self._ParseSettings(reader)
           if new_settings.name in settings_names:
-            raise SyntaxError("Duplicate settings name: '%s'." %
-                              new_settings.name)
+            raise SyntaxError(
+                "Duplicate settings name: '%s'." % new_settings.name)
           settings_names[new_settings.name] = True
           self.all_settings.append(new_settings)
         elif ExperimentFile._FIELD_VALUE_RE.match(line):
@@ -160,11 +160,8 @@
               autotest_path = ''
               if autotest_field.assigned:
                 autotest_path = autotest_field.GetString()
-              image_path, autotest_path = settings.GetXbuddyPath(value,
-                                                                 autotest_path,
-                                                                 board,
-                                                                 chromeos_root,
-                                                                 'quiet')
+              image_path, autotest_path = settings.GetXbuddyPath(
+                  value, autotest_path, board, chromeos_root, 'quiet')
               res += '\t#actual_image: %s\n' % image_path
               if not autotest_field.assigned:
                 res += '\t#actual_autotest_path: %s\n' % autotest_path
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index ed1f176..d4a0210 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -3,7 +3,6 @@
 # Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """The unittest of experiment_file."""
 from __future__ import print_function
 import StringIO
@@ -87,6 +86,7 @@
 
 class ExperimentFileTest(unittest.TestCase):
   """The main class for Experiment File test."""
+
   def testLoadExperimentFile1(self):
     input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
     experiment_file = ExperimentFile(input_file)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index b30c8bd..b583743 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -26,14 +26,15 @@
 from results_report import JSONResultsReport
 from schedv2 import Schedv2
 
+
 def _WriteJSONReportToFile(experiment, results_dir, json_report):
   """Writes a JSON report to a file in results_dir."""
   has_llvm = any('llvm' in l.compiler for l in experiment.labels)
   compiler_string = 'llvm' if has_llvm else 'gcc'
   board = experiment.labels[0].board
-  filename = 'report_%s_%s_%s.%s.json' % (
-      board, json_report.date, json_report.time.replace(':', '.'),
-      compiler_string)
+  filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
+                                          json_report.time.replace(':', '.'),
+                                          compiler_string)
   fullname = os.path.join(results_dir, filename)
   report_text = json_report.GetReport()
   with open(fullname, 'w') as out_file:
@@ -151,9 +152,10 @@
       cache.Init(br.label.chromeos_image, br.label.chromeos_root,
                  br.benchmark.test_name, br.iteration, br.test_args,
                  br.profiler_args, br.machine_manager, br.machine,
-                 br.label.board, br.cache_conditions, br._logger, br.log_level,
-                 br.label, br.share_cache, br.benchmark.suite,
-                 br.benchmark.show_all_results, br.benchmark.run_local)
+                 br.label.board, br.cache_conditions,
+                 br.logger(), br.log_level, br.label, br.share_cache,
+                 br.benchmark.suite, br.benchmark.show_all_results,
+                 br.benchmark.run_local)
       cache_dir = cache.GetCacheDirForWrite()
       if os.path.exists(cache_dir):
         self.l.LogOutput('Removing cache dir: %s' % cache_dir)
@@ -229,18 +231,19 @@
     subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
 
     text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
-    text_report += ('\nResults are stored in %s.\n' %
-                    experiment.results_directory)
+    text_report += (
+        '\nResults are stored in %s.\n' % experiment.results_directory)
     text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
     html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
     attachment = EmailSender.Attachment('report.html', html_report)
     email_to = experiment.email_to or []
     email_to.append(getpass.getuser())
-    EmailSender().SendEmail(email_to,
-                            subject,
-                            text_report,
-                            attachments=[attachment],
-                            msg_type='html')
+    EmailSender().SendEmail(
+        email_to,
+        subject,
+        text_report,
+        attachments=[attachment],
+        msg_type='html')
 
   def _StoreResults(self, experiment):
     if self._terminated:
@@ -256,8 +259,8 @@
     results_table_path = os.path.join(results_directory, 'results.html')
     report = HTMLResultsReport.FromExperiment(experiment).GetReport()
     if self.json_report:
-      json_report = JSONResultsReport.FromExperiment(experiment,
-                                                     json_args={'indent': 2})
+      json_report = JSONResultsReport.FromExperiment(
+          experiment, json_args={'indent': 2})
       _WriteJSONReportToFile(experiment, results_directory, json_report)
 
     FileUtils().WriteFile(results_table_path, report)
@@ -265,8 +268,8 @@
     self.l.LogOutput('Storing email message body in %s.' % results_directory)
     msg_file_path = os.path.join(results_directory, 'msg_body.html')
     text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
-    text_report += ('\nResults are stored in %s.\n' %
-                    experiment.results_directory)
+    text_report += (
+        '\nResults are stored in %s.\n' % experiment.results_directory)
     msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
     FileUtils().WriteFile(msg_file_path, msg_body)
 
@@ -296,8 +299,8 @@
     super(MockExperimentRunner, self).__init__(experiment, json_report)
 
   def _Run(self, experiment):
-    self.l.LogOutput("Would run the following experiment: '%s'." %
-                     experiment.name)
+    self.l.LogOutput(
+        "Would run the following experiment: '%s'." % experiment.name)
 
   def _PrintTable(self, experiment):
     self.l.LogOutput('Would print the experiment table.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 38ac387..4809894 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -106,9 +106,8 @@
   def make_fake_experiment(self):
     test_flag.SetTestMode(True)
     experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
-    experiment = ExperimentFactory().GetExperiment(experiment_file,
-                                                   working_directory='',
-                                                   log_dir='')
+    experiment = ExperimentFactory().GetExperiment(
+        experiment_file, working_directory='', log_dir='')
     return experiment
 
   @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
@@ -120,20 +119,22 @@
     self.exp = self.make_fake_experiment()
 
   def test_init(self):
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     self.assertFalse(er._terminated)
     self.assertEqual(er.STATUS_TIME_DELAY, 10)
 
     self.exp.log_level = 'verbose'
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     self.assertEqual(er.STATUS_TIME_DELAY, 30)
 
   @mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
@@ -164,11 +165,12 @@
 
     # Test 1: log_level == "quiet"
     self.exp.log_level = 'quiet'
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     er.STATUS_TIME_DELAY = 2
     mock_status_string.return_value = 'Fake status string'
     er._Run(self.exp)
@@ -180,9 +182,10 @@
     self.assertEqual(self.mock_logger.dot_count, 2)
     self.assertEqual(mock_progress_string.call_count, 0)
     self.assertEqual(mock_status_string.call_count, 2)
-    self.assertEqual(self.mock_logger.output_msgs,
-                     ['==============================', 'Fake status string',
-                      '=============================='])
+    self.assertEqual(self.mock_logger.output_msgs, [
+        '==============================', 'Fake status string',
+        '=============================='
+    ])
     self.assertEqual(len(self.mock_logger.error_msgs), 0)
 
     # Test 2: log_level == "average"
@@ -190,11 +193,12 @@
     reset()
     self.exp.log_level = 'average'
     mock_status_string.call_count = 0
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     er.STATUS_TIME_DELAY = 2
     mock_status_string.return_value = 'Fake status string'
     er._Run(self.exp)
@@ -206,9 +210,10 @@
     self.assertEqual(self.mock_logger.dot_count, 2)
     self.assertEqual(mock_progress_string.call_count, 0)
     self.assertEqual(mock_status_string.call_count, 2)
-    self.assertEqual(self.mock_logger.output_msgs,
-                     ['==============================', 'Fake status string',
-                      '=============================='])
+    self.assertEqual(self.mock_logger.output_msgs, [
+        '==============================', 'Fake status string',
+        '=============================='
+    ])
     self.assertEqual(len(self.mock_logger.error_msgs), 0)
 
     # Test 3: log_level == "verbose"
@@ -216,11 +221,12 @@
     reset()
     self.exp.log_level = 'verbose'
     mock_status_string.call_count = 0
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     er.STATUS_TIME_DELAY = 2
     mock_status_string.return_value = 'Fake status string'
     mock_progress_string.return_value = 'Fake progress string'
@@ -233,22 +239,24 @@
     self.assertEqual(self.mock_logger.dot_count, 0)
     self.assertEqual(mock_progress_string.call_count, 2)
     self.assertEqual(mock_status_string.call_count, 2)
-    self.assertEqual(self.mock_logger.output_msgs,
-                     ['==============================', 'Fake progress string',
-                      'Fake status string', '==============================',
-                      '==============================', 'Fake progress string',
-                      'Fake status string', '=============================='])
+    self.assertEqual(self.mock_logger.output_msgs, [
+        '==============================', 'Fake progress string',
+        'Fake status string', '==============================',
+        '==============================', 'Fake progress string',
+        'Fake status string', '=============================='
+    ])
     self.assertEqual(len(self.mock_logger.error_msgs), 0)
 
   @mock.patch.object(TextResultsReport, 'GetReport')
   def test_print_table(self, mock_report):
     self.mock_logger.Reset()
     mock_report.return_value = 'This is a fake experiment report.'
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     er._PrintTable(self.exp)
     self.assertEqual(mock_report.call_count, 1)
     self.assertEqual(self.mock_logger.output_msgs,
@@ -269,11 +277,12 @@
     self.mock_logger.Reset()
     config.AddConfig('no_email', True)
     self.exp.email_to = ['jane.doe@google.com']
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
     # Test 1. Config:no_email; exp.email_to set ==> no email sent
     er._Email(self.exp)
     self.assertEqual(mock_getuser.call_count, 0)
@@ -295,8 +304,8 @@
     self.assertEqual(mock_html_report.call_count, 1)
     self.assertEqual(len(mock_emailer.call_args), 2)
     self.assertEqual(mock_emailer.call_args[0],
-                     (['jane.doe@google.com', 'john.smith@google.com'],
-                      ': image1 vs. image2',
+                     (['jane.doe@google.com',
+                       'john.smith@google.com'], ': image1 vs. image2',
                       "<pre style='font-size: 13px'>This is a fake text "
                       'report.\nResults are stored in _results.\n</pre>'))
     self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -325,8 +334,10 @@
     self.assertEqual(mock_html_report.call_count, 1)
     self.assertEqual(len(mock_emailer.call_args), 2)
     self.assertEqual(mock_emailer.call_args[0],
-                     (['jane.doe@google.com', 'john.smith@google.com',
-                       'john.smith@google.com'], ': image1 vs. image2',
+                     ([
+                         'jane.doe@google.com', 'john.smith@google.com',
+                         'john.smith@google.com'
+                     ], ': image1 vs. image2',
                       "<pre style='font-size: 13px'>This is a fake text "
                       'report.\nResults are stored in _results.\n</pre>'))
     self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -393,15 +404,16 @@
     self.mock_logger.Reset()
     self.exp.results_directory = '/usr/local/crosperf-results'
     bench_run = self.exp.benchmark_runs[5]
-    bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
-                                                         bench_run.name)
+    bench_path = '/usr/local/crosperf-results/' + filter(
+        str.isalnum, bench_run.name)
     self.assertEqual(len(self.exp.benchmark_runs), 6)
 
-    er = experiment_runner.ExperimentRunner(self.exp,
-                                            json_report=False,
-                                            using_schedv2=False,
-                                            log=self.mock_logger,
-                                            cmd_exec=self.mock_cmd_exec)
+    er = experiment_runner.ExperimentRunner(
+        self.exp,
+        json_report=False,
+        using_schedv2=False,
+        log=self.mock_logger,
+        cmd_exec=self.mock_cmd_exec)
 
     # Test 1. Make sure nothing is done if _terminated is true.
     er._terminated = True
@@ -438,12 +450,12 @@
     self.assertEqual(mock_rmdir.call_count, 1)
     mock_rmdir.called_with('/usr/local/crosperf-results')
     self.assertEqual(self.mock_logger.LogOutputCount, 4)
-    self.assertEqual(
-        self.mock_logger.output_msgs,
-        ['Storing experiment file in /usr/local/crosperf-results.',
-         'Storing results report in /usr/local/crosperf-results.',
-         'Storing email message body in /usr/local/crosperf-results.',
-         'Storing results of each benchmark run.'])
+    self.assertEqual(self.mock_logger.output_msgs, [
+        'Storing experiment file in /usr/local/crosperf-results.',
+        'Storing results report in /usr/local/crosperf-results.',
+        'Storing email message body in /usr/local/crosperf-results.',
+        'Storing results of each benchmark run.'
+    ])
 
 
 if __name__ == '__main__':
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 627db99..c661043 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -80,8 +80,8 @@
     strings.append('Current time: %s Elapsed: %s ETA: %s' %
                    (datetime.datetime.now(),
                     datetime.timedelta(seconds=int(elapsed_time)), eta))
-    strings.append(self._GetProgressBar(self.experiment.num_complete,
-                                        self.num_total))
+    strings.append(
+        self._GetProgressBar(self.experiment.num_complete, self.num_total))
     return '\n'.join(strings)
 
   def GetStatusString(self):
@@ -107,8 +107,8 @@
           self.experiment.machine_manager.AsString())
     elif self.experiment.schedv2():
       # In schedv2 mode, we always print out thread status.
-      thread_status = thread_status_format.format(self.experiment.schedv2(
-      ).threads_status_as_string())
+      thread_status = thread_status_format.format(
+          self.experiment.schedv2().threads_status_as_string())
 
     result = '{}{}'.format(thread_status, '\n'.join(status_strings))
 
diff --git a/crosperf/field.py b/crosperf/field.py
index bc92e2c..6821d4d 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -68,8 +68,8 @@
       return True
     elif value.lower() == 'false':
       return False
-    raise TypeError("Invalid value for '%s'. Must be true or false." %
-                    self.name)
+    raise TypeError(
+        "Invalid value for '%s'. Must be true or false." % self.name)
 
 
 class IntegerField(Field):
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index 9f2a713..0e74327 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -1,7 +1,6 @@
 #!/usr/bin/env python2
 #
 # Copyright 2014 Google Inc. All Rights Reserved.
-
 """The unittest of flags."""
 
 from __future__ import print_function
@@ -12,6 +11,7 @@
 
 class FlagTestCase(unittest.TestCase):
   """The unittest class."""
+
   def test_test_flag(self):
     # Verify that test_flag.is_test exists, that it is a list,
     # and that it contains 1 element.
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
index e0add99..fd7a2cf 100755
--- a/crosperf/generate_report.py
+++ b/crosperf/generate_report.py
@@ -3,7 +3,6 @@
 # Copyright 2016 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """Given a specially-formatted JSON object, generates results report(s).
 
 The JSON object should look like:
@@ -62,10 +61,12 @@
 
 def CountBenchmarks(benchmark_runs):
   """Counts the number of iterations for each benchmark in benchmark_runs."""
+
   # Example input for benchmark_runs:
   # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
   def _MaxLen(results):
     return 0 if not results else max(len(r) for r in results)
+
   return [(name, _MaxLen(results))
           for name, results in benchmark_runs.iteritems()]
 
@@ -121,8 +122,8 @@
           len(retained_keys) != len(removable_keys)
 
   if actually_updated and complain_on_update:
-    print("Warning: Some benchmark keyvals have been truncated.",
-          file=sys.stderr)
+    print(
+        'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
   return results
 
 
@@ -144,7 +145,7 @@
 def _PositiveInt(s):
   i = int(s)
   if i < 0:
-    raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i, ))
+    raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
   return i
 
 
@@ -182,13 +183,13 @@
   """
   if output_prefix == '-':
     if verbose:
-      print('Writing %s report to stdout' % (extension, ), file=sys.stderr)
+      print('Writing %s report to stdout' % (extension,), file=sys.stderr)
     sys.stdout.write(get_contents())
     return
 
   file_name = '%s.%s' % (output_prefix, extension)
   if not overwrite and os.path.exists(file_name):
-    raise IOError('Refusing to write %s -- it already exists' % (file_name, ))
+    raise IOError('Refusing to write %s -- it already exists' % (file_name,))
 
   with open(file_name, 'w') as out_file:
     if verbose:
@@ -200,7 +201,7 @@
   """Runs `actions`, returning True if all succeeded."""
   failed = False
 
-  report_ctor = None # Make the linter happy
+  report_ctor = None  # Make the linter happy
   for report_ctor, extension in actions:
     try:
       get_contents = lambda: report_ctor(benchmark_results).GetReport()
@@ -225,27 +226,49 @@
 def _ParseArgs(argv):
   parser = argparse.ArgumentParser(description='Turns JSON into results '
                                    'report(s).')
-  parser.add_argument('-v', '--verbose', action='store_true',
-                      help='Be a tiny bit more verbose.')
-  parser.add_argument('-f', '--force', action='store_true',
-                      help='Overwrite existing results files.')
-  parser.add_argument('-o', '--output', default='report', type=str,
-                      help='Prefix of the output filename (default: report). '
-                      '- means stdout.')
-  parser.add_argument('-i', '--input', required=True, type=str,
-                      help='Where to read the JSON from. - means stdin.')
-  parser.add_argument('-l', '--statistic-limit', default=0, type=_PositiveInt,
-                      help='The maximum number of benchmark statistics to '
-                      'display from a single run. 0 implies unlimited.')
-  parser.add_argument('--json', action='store_true',
-                      help='Output a JSON report.')
-  parser.add_argument('--text', action='store_true',
-                      help='Output a text report.')
-  parser.add_argument('--email', action='store_true',
-                      help='Output a text report suitable for email.')
-  parser.add_argument('--html', action='store_true',
-                      help='Output an HTML report (this is the default if no '
-                      'other output format is specified).')
+  parser.add_argument(
+      '-v',
+      '--verbose',
+      action='store_true',
+      help='Be a tiny bit more verbose.')
+  parser.add_argument(
+      '-f',
+      '--force',
+      action='store_true',
+      help='Overwrite existing results files.')
+  parser.add_argument(
+      '-o',
+      '--output',
+      default='report',
+      type=str,
+      help='Prefix of the output filename (default: report). '
+      '- means stdout.')
+  parser.add_argument(
+      '-i',
+      '--input',
+      required=True,
+      type=str,
+      help='Where to read the JSON from. - means stdin.')
+  parser.add_argument(
+      '-l',
+      '--statistic-limit',
+      default=0,
+      type=_PositiveInt,
+      help='The maximum number of benchmark statistics to '
+      'display from a single run. 0 implies unlimited.')
+  parser.add_argument(
+      '--json', action='store_true', help='Output a JSON report.')
+  parser.add_argument(
+      '--text', action='store_true', help='Output a text report.')
+  parser.add_argument(
+      '--email',
+      action='store_true',
+      help='Output a text report suitable for email.')
+  parser.add_argument(
+      '--html',
+      action='store_true',
+      help='Output an HTML report (this is the default if no '
+      'other output format is specified).')
   return parser.parse_args(argv)
 
 
@@ -263,13 +286,13 @@
   benches = CountBenchmarks(results)
   # In crosperf, a label is essentially a platform+configuration. So, a name of
   # a label and a name of a platform are equivalent for our purposes.
-  bench_results = BenchmarkResults(label_names=platform_names,
-                                   benchmark_names_and_iterations=benches,
-                                   run_keyvals=results,
-                                   read_perf_report=_NoPerfReport)
+  bench_results = BenchmarkResults(
+      label_names=platform_names,
+      benchmark_names_and_iterations=benches,
+      run_keyvals=results,
+      read_perf_report=_NoPerfReport)
   actions = _AccumulateActions(args)
-  ok = RunActions(actions, bench_results, args.output, args.force,
-                  args.verbose)
+  ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
   return 0 if ok else 1
 
 
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
index a5d0063..bbb0c0a 100755
--- a/crosperf/generate_report_unittest.py
+++ b/crosperf/generate_report_unittest.py
@@ -19,8 +19,10 @@
 import generate_report
 import results_report
 
+
 class _ContextualStringIO(StringIO):
   """StringIO that can be used in `with` statements."""
+
   def __init__(self, *args):
     StringIO.__init__(self, *args)
 
@@ -33,6 +35,7 @@
 
 class GenerateReportTests(unittest.TestCase):
   """Tests for generate_report.py."""
+
   def testCountBenchmarks(self):
     runs = {
         'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
@@ -45,16 +48,33 @@
 
   def testCutResultsInPlace(self):
     bench_data = {
-        'foo': [[{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 2.5, 'c': 1}]],
-        'bar': [[{'d': 11, 'e': 12, 'f': 13}]],
-        'baz': [[{'g': 12, 'h': 13}]],
-        'qux': [[{'i': 11}]],
+        'foo': [[{
+            'a': 1,
+            'b': 2,
+            'c': 3
+        }, {
+            'a': 3,
+            'b': 2.5,
+            'c': 1
+        }]],
+        'bar': [[{
+            'd': 11,
+            'e': 12,
+            'f': 13
+        }]],
+        'baz': [[{
+            'g': 12,
+            'h': 13
+        }]],
+        'qux': [[{
+            'i': 11
+        }]],
     }
     original_bench_data = copy.deepcopy(bench_data)
 
     max_keys = 2
-    results = generate_report.CutResultsInPlace(bench_data, max_keys=max_keys,
-                                                complain_on_update=False)
+    results = generate_report.CutResultsInPlace(
+        bench_data, max_keys=max_keys, complain_on_update=False)
     # Cuts should be in-place.
     self.assertIs(results, bench_data)
     self.assertItemsEqual(original_bench_data.keys(), bench_data.keys())
@@ -68,15 +88,21 @@
           # sub_keyvals must be a subset of original_keyvals
           self.assertDictContainsSubset(sub_keyvals, original_keyvals)
 
-
   def testCutResultsInPlaceLeavesRetval(self):
     bench_data = {
-        'foo': [[{'retval': 0, 'a': 1}]],
-        'bar': [[{'retval': 1}]],
-        'baz': [[{'RETVAL': 1}]],
+        'foo': [[{
+            'retval': 0,
+            'a': 1
+        }]],
+        'bar': [[{
+            'retval': 1
+        }]],
+        'baz': [[{
+            'RETVAL': 1
+        }]],
     }
-    results = generate_report.CutResultsInPlace(bench_data, max_keys=0,
-                                                complain_on_update=False)
+    results = generate_report.CutResultsInPlace(
+        bench_data, max_keys=0, complain_on_update=False)
     # Just reach into results assuming we know it otherwise outputs things
     # sanely. If it doesn't, testCutResultsInPlace should give an indication as
     # to what, exactly, is broken.
@@ -121,12 +147,12 @@
   # We only mock print_exc so we don't have exception info printed to stdout.
   @mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
   @mock.patch('traceback.print_exc')
-  def testRunActionsRunsAllActionsRegardlessOfExceptions(self, mock_print_exc,
-                                                         mock_write_file):
+  def testRunActionsRunsAllActionsRegardlessOfExceptions(
+      self, mock_print_exc, mock_write_file):
     actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
     output_prefix = '-'
-    ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
-                                    verbose=False)
+    ok = generate_report.RunActions(
+        actions, {}, output_prefix, overwrite=False, verbose=False)
     self.assertFalse(ok)
     self.assertEqual(mock_write_file.call_count, len(actions))
     self.assertEqual(mock_print_exc.call_count, len(actions))
@@ -135,8 +161,8 @@
   def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
     actions = [(None, 'json'), (None, 'html'), (None, 'text')]
     output_prefix = '-'
-    ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
-                                    verbose=False)
+    ok = generate_report.RunActions(
+        actions, {}, output_prefix, overwrite=False, verbose=False)
     self.assertEqual(mock_write_file.call_count, len(actions))
     self.assertTrue(ok)
 
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index e330084..f5862e4 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -25,8 +25,8 @@
     def Checksum(self):
       with self._lock:
         if not self._checksum:
-          logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
-                                       self.label.name)
+          logger.GetLogger().LogOutput(
+              "Acquiring checksum for '%s'." % self.label.name)
           self._checksum = None
           if self.label.image_type != 'local':
             raise RuntimeError('Called Checksum on non-local image!')
@@ -48,8 +48,8 @@
   def __new__(cls, *args, **kwargs):
     with cls._lock:
       if not cls._instance:
-        cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
-                                                             **kwargs)
+        cls._instance = super(ImageChecksummer, cls).__new__(
+            cls, *args, **kwargs)
       return cls._instance
 
   def Checksum(self, label, log_level):
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index 3cc464b..2ad750d 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,10 +1,9 @@
-
 # Copyright 2015 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """MachineImageManager allocates images to duts."""
 
+
 class MachineImageManager(object):
   """Management of allocating images to duts.
 
@@ -132,8 +131,7 @@
     * Special / common case to handle seperately
 
       We have only 1 dut or if we have only 1 label, that's simple enough.
-
-    """
+  """
 
   def __init__(self, labels, duts):
     self.labels_ = labels
@@ -158,13 +156,13 @@
   def compute_initial_allocation(self):
     """Compute the initial label-dut allocation.
 
-        This method finds the most efficient way that every label gets imaged at
-        least once.
+    This method finds the most efficient way that every label gets imaged at
+    least once.
 
-        Returns:
-          False, only if not all labels could be imaged to a certain machine,
-          otherwise True.
-        """
+    Returns:
+      False, only if not all labels could be imaged to a certain machine,
+      otherwise True.
+    """
 
     if self.n_duts_ == 1:
       for i, v in self.matrix_vertical_generator(0):
@@ -196,15 +194,15 @@
   def allocate(self, dut, schedv2=None):
     """Allocate a label for dut.
 
-        Args:
-          dut: the dut that asks for a new image.
-          schedv2: the scheduling instance, we need the benchmark run
-                   information with schedv2 for a better allocation.
+    Args:
+      dut: the dut that asks for a new image.
+      schedv2: the scheduling instance, we need the benchmark run
+               information with schedv2 for a better allocation.
 
-        Returns:
-          a label to image onto the dut or None if no more available images for
-          the dut.
-        """
+    Returns:
+      a label to image onto the dut or None if no more available images for
+      the dut.
+    """
     j = self.dut_name_ordinal_[dut.name]
     # 'can_' prefix means candidate label's.
     can_reimage_number = 999
@@ -270,16 +268,16 @@
   def matrix_vertical_generator(self, col):
     """Iterate matrix vertically at column 'col'.
 
-        Yield row number i and value at matrix_[i][col].
-        """
+    Yield row number i and value at matrix_[i][col].
+    """
     for i, _ in enumerate(self.labels_):
       yield i, self.matrix_[i][col]
 
   def matrix_horizontal_generator(self, row):
     """Iterate matrix horizontally at row 'row'.
 
-        Yield col number j and value at matrix_[row][j].
-        """
+    Yield col number j and value at matrix_[row][j].
+    """
     for j, _ in enumerate(self.duts_):
       yield j, self.matrix_[row][j]
 
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index fe41dc0..02afaa0 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -1,7 +1,6 @@
 #!/usr/bin/env python2
 
 # Copyright 2015 Google Inc. All Rights Reserved.
-
 """Unit tests for the MachineImageManager class."""
 
 from __future__ import print_function
@@ -23,14 +22,14 @@
     """Provide hash function for label.
 
        This is required because Label object is used inside a dict as key.
-       """
+    """
     return hash(self.name)
 
   def __eq__(self, other):
     """Provide eq function for label.
 
        This is required because Label object is used inside a dict as key.
-       """
+    """
     return isinstance(other, MockLabel) and other.name == self.name
 
 
@@ -52,6 +51,7 @@
     return duts
 
   def print_matrix(self, matrix):
+    # pylint: disable=expression-not-assigned
     for r in matrix:
       for v in r:
         print('{} '.format('.' if v == ' ' else v)),
@@ -97,53 +97,63 @@
     self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
 
   def test_case1(self):
-    labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
-              MockLabel('l3', ['m1'])]
+    labels = [
+        MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+            'l3', ['m1'])
+    ]
     duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
     mim = MachineImageManager(labels, duts)
-    self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+                                    [' ', 'X', 'X']])
     mim.compute_initial_allocation()
-    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+                                    ['Y', 'X', 'X']])
 
   def test_case2(self):
-    labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
-              MockLabel('l3', ['m1'])]
+    labels = [
+        MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+            'l3', ['m1'])
+    ]
     duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
     mim = MachineImageManager(labels, duts)
-    self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+                                    [' ', 'X', 'X']])
     mim.compute_initial_allocation()
-    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+                                    ['Y', 'X', 'X']])
 
   def test_case3(self):
-    labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
-              MockLabel('l3', ['m1'])]
+    labels = [
+        MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+            'l3', ['m1'])
+    ]
     duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
     mim = MachineImageManager(labels, duts)
     mim.compute_initial_allocation()
-    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+                                    ['Y', 'X', 'X']])
 
   def test_case4(self):
-    labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
-              MockLabel('l3', ['m1'])]
+    labels = [
+        MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+            'l3', ['m1'])
+    ]
     duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
     mim = MachineImageManager(labels, duts)
     mim.compute_initial_allocation()
-    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+                                    ['Y', 'X', 'X']])
 
   def test_case5(self):
-    labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
-              MockLabel('l3', ['m1'])]
+    labels = [
+        MockLabel('l1', ['m3']), MockLabel('l2', ['m3']), MockLabel(
+            'l3', ['m1'])
+    ]
     duts = self.gen_duts_by_name('m1', 'm2', 'm3')
     mim = MachineImageManager(labels, duts)
     self.assertTrue(mim.compute_initial_allocation())
-    self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
-                                                                       'X']])
+    self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'],
+                                    ['Y', 'X', 'X']])
 
   def test_2x2_with_allocation(self):
     labels = [MockLabel('l0'), MockLabel('l1')]
@@ -193,29 +203,37 @@
     self.assertTrue(mim.compute_initial_allocation())
 
   def test_10x10_fully_random(self):
-    inp = ['X  .  .  .  X  X  .  X  X  .', 'X  X  .  X  .  X  .  X  X  .',
-           'X  X  X  .  .  X  .  X  .  X', 'X  .  X  X  .  .  X  X  .  X',
-           'X  X  X  X  .  .  .  X  .  .', 'X  X  .  X  .  X  .  .  X  .',
-           '.  X  .  X  .  X  X  X  .  .', '.  X  .  X  X  .  X  X  .  .',
-           'X  X  .  .  .  X  X  X  .  .', '.  X  X  X  X  .  .  .  .  X']
-    output = ['X  Y  .  .  X  X  .  X  X  .', 'X  X  Y  X  .  X  .  X  X  .',
-              'X  X  X  Y  .  X  .  X  .  X', 'X  .  X  X  Y  .  X  X  .  X',
-              'X  X  X  X  .  Y  .  X  .  .', 'X  X  .  X  .  X  Y  .  X  .',
-              'Y  X  .  X  .  X  X  X  .  .', '.  X  .  X  X  .  X  X  Y  .',
-              'X  X  .  .  .  X  X  X  .  Y', '.  X  X  X  X  .  .  Y  .  X']
+    inp = [
+        'X  .  .  .  X  X  .  X  X  .', 'X  X  .  X  .  X  .  X  X  .',
+        'X  X  X  .  .  X  .  X  .  X', 'X  .  X  X  .  .  X  X  .  X',
+        'X  X  X  X  .  .  .  X  .  .', 'X  X  .  X  .  X  .  .  X  .',
+        '.  X  .  X  .  X  X  X  .  .', '.  X  .  X  X  .  X  X  .  .',
+        'X  X  .  .  .  X  X  X  .  .', '.  X  X  X  X  .  .  .  .  X'
+    ]
+    output = [
+        'X  Y  .  .  X  X  .  X  X  .', 'X  X  Y  X  .  X  .  X  X  .',
+        'X  X  X  Y  .  X  .  X  .  X', 'X  .  X  X  Y  .  X  X  .  X',
+        'X  X  X  X  .  Y  .  X  .  .', 'X  X  .  X  .  X  Y  .  X  .',
+        'Y  X  .  X  .  X  X  X  .  .', '.  X  .  X  X  .  X  X  Y  .',
+        'X  X  .  .  .  X  X  X  .  Y', '.  X  X  X  X  .  .  Y  .  X'
+    ]
     self.pattern_based_test(inp, output)
 
   def test_10x10_fully_random2(self):
-    inp = ['X  .  X  .  .  X  .  X  X  X', 'X  X  X  X  X  X  .  .  X  .',
-           'X  .  X  X  X  X  X  .  .  X', 'X  X  X  .  X  .  X  X  .  .',
-           '.  X  .  X  .  X  X  X  X  X', 'X  X  X  X  X  X  X  .  .  X',
-           'X  .  X  X  X  X  X  .  .  X', 'X  X  X  .  X  X  X  X  .  .',
-           'X  X  X  .  .  .  X  X  X  X', '.  X  X  .  X  X  X  .  X  X']
-    output = ['X  .  X  Y  .  X  .  X  X  X', 'X  X  X  X  X  X  Y  .  X  .',
-              'X  Y  X  X  X  X  X  .  .  X', 'X  X  X  .  X  Y  X  X  .  .',
-              '.  X  Y  X  .  X  X  X  X  X', 'X  X  X  X  X  X  X  Y  .  X',
-              'X  .  X  X  X  X  X  .  Y  X', 'X  X  X  .  X  X  X  X  .  Y',
-              'X  X  X  .  Y  .  X  X  X  X', 'Y  X  X  .  X  X  X  .  X  X']
+    inp = [
+        'X  .  X  .  .  X  .  X  X  X', 'X  X  X  X  X  X  .  .  X  .',
+        'X  .  X  X  X  X  X  .  .  X', 'X  X  X  .  X  .  X  X  .  .',
+        '.  X  .  X  .  X  X  X  X  X', 'X  X  X  X  X  X  X  .  .  X',
+        'X  .  X  X  X  X  X  .  .  X', 'X  X  X  .  X  X  X  X  .  .',
+        'X  X  X  .  .  .  X  X  X  X', '.  X  X  .  X  X  X  .  X  X'
+    ]
+    output = [
+        'X  .  X  Y  .  X  .  X  X  X', 'X  X  X  X  X  X  Y  .  X  .',
+        'X  Y  X  X  X  X  X  .  .  X', 'X  X  X  .  X  Y  X  X  .  .',
+        '.  X  Y  X  .  X  X  X  X  X', 'X  X  X  X  X  X  X  Y  .  X',
+        'X  .  X  X  X  X  X  .  Y  X', 'X  X  X  .  X  X  X  X  .  Y',
+        'X  X  X  .  Y  .  X  X  X  X', 'Y  X  X  .  X  X  X  .  X  X'
+    ]
     self.pattern_based_test(inp, output)
 
   def test_3x4_with_allocation(self):
@@ -273,7 +291,7 @@
         l1   Y     X     X
 
         l2   Y     X     X
-        """
+    """
 
     inp = ['.  X  X', '.  X  X', '.  X  X']
     output = ['Y  X  X', 'Y  X  X', 'Y  X  X']
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 2fdf141..b9dda14 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -78,9 +78,8 @@
 
   def IsReachable(self):
     command = 'ls'
-    ret = self.ce.CrosRunCommand(command,
-                                 machine=self.name,
-                                 chromeos_root=self.chromeos_root)
+    ret = self.ce.CrosRunCommand(
+        command, machine=self.name, chromeos_root=self.chromeos_root)
     if ret:
       return False
     return True
@@ -121,9 +120,7 @@
     #meminfo, the assert does not catch it either
     command = 'cat /proc/meminfo'
     ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
-        command,
-        machine=self.name,
-        chromeos_root=self.chromeos_root)
+        command, machine=self.name, chromeos_root=self.chromeos_root)
     assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
     if ret == 0:
       self._ParseMemoryInfo()
@@ -131,9 +128,7 @@
   def _GetCPUInfo(self):
     command = 'cat /proc/cpuinfo'
     ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
-        command,
-        machine=self.name,
-        chromeos_root=self.chromeos_root)
+        command, machine=self.name, chromeos_root=self.chromeos_root)
     assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
 
   def _ComputeMachineChecksumString(self):
@@ -153,9 +148,7 @@
   def _GetMachineID(self):
     command = 'dump_vpd_log --full --stdout'
     _, if_out, _ = self.ce.CrosRunCommandWOutput(
-        command,
-        machine=self.name,
-        chromeos_root=self.chromeos_root)
+        command, machine=self.name, chromeos_root=self.chromeos_root)
     b = if_out.splitlines()
     a = [l for l in b if 'Product' in l]
     if len(a):
@@ -163,9 +156,7 @@
       return
     command = 'ifconfig'
     _, if_out, _ = self.ce.CrosRunCommandWOutput(
-        command,
-        machine=self.name,
-        chromeos_root=self.chromeos_root)
+        command, machine=self.name, chromeos_root=self.chromeos_root)
     b = if_out.splitlines()
     a = [l for l in b if 'HWaddr' in l]
     if len(a):
@@ -222,8 +213,8 @@
     self.logger = lgr or logger.GetLogger()
 
     if self.locks_dir and not os.path.isdir(self.locks_dir):
-      raise MissingLocksDirectory('Cannot access locks directory: %s' %
-                                  self.locks_dir)
+      raise MissingLocksDirectory(
+          'Cannot access locks directory: %s' % self.locks_dir)
 
     self._initialized_machines = []
     self.chromeos_root = chromeos_root
@@ -242,12 +233,10 @@
 
     cmd = '/opt/google/chrome/chrome --version'
     ret, version, _ = self.ce.CrosRunCommandWOutput(
-        cmd,
-        machine=machine.name,
-        chromeos_root=self.chromeos_root)
+        cmd, machine=machine.name, chromeos_root=self.chromeos_root)
     if ret != 0:
-      raise CrosCommandError("Couldn't get Chrome version from %s." %
-                             machine.name)
+      raise CrosCommandError(
+          "Couldn't get Chrome version from %s." % machine.name)
 
     if ret != 0:
       version = ''
@@ -261,11 +250,13 @@
     chromeos_root = label.chromeos_root
     if not chromeos_root:
       chromeos_root = self.chromeos_root
-    image_chromeos_args = [image_chromeos.__file__, '--no_lock',
-                           '--chromeos_root=%s' % chromeos_root,
-                           '--image=%s' % label.chromeos_image,
-                           '--image_args=%s' % label.image_args, '--remote=%s' %
-                           machine.name, '--logging_level=%s' % self.log_level]
+    image_chromeos_args = [
+        image_chromeos.__file__, '--no_lock',
+        '--chromeos_root=%s' % chromeos_root,
+        '--image=%s' % label.chromeos_image,
+        '--image_args=%s' % label.image_args, '--remote=%s' % machine.name,
+        '--logging_level=%s' % self.log_level
+    ]
     if label.board:
       image_chromeos_args.append('--board=%s' % label.board)
 
@@ -287,9 +278,8 @@
         cmd = 'reboot && exit'
         if self.log_level != 'verbose':
           self.logger.LogOutput('reboot & exit.')
-        self.ce.CrosRunCommand(cmd,
-                               machine=machine.name,
-                               chromeos_root=self.chromeos_root)
+        self.ce.CrosRunCommand(
+            cmd, machine=machine.name, chromeos_root=self.chromeos_root)
         time.sleep(60)
         if self.log_level != 'verbose':
           self.logger.LogOutput('Pushing image onto machine.')
@@ -349,8 +339,8 @@
       locked = True
       if self.locks_dir:
         locked = file_lock_machine.Machine(cros_machine.name,
-                                           self.locks_dir).Lock(True,
-                                                                sys.argv[0])
+                                           self.locks_dir).Lock(
+                                               True, sys.argv[0])
       if locked:
         self._machines.append(cros_machine)
         command = 'cat %s' % CHECKSUM_FILE
@@ -371,8 +361,8 @@
 
       if self.log_level != 'verbose':
         self.logger.LogOutput('Setting up remote access to %s' % machine_name)
-        self.logger.LogOutput('Checking machine characteristics for %s' %
-                              machine_name)
+        self.logger.LogOutput(
+            'Checking machine characteristics for %s' % machine_name)
       cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
       if cm.machine_checksum:
         self._all_machines.append(cm)
@@ -411,17 +401,19 @@
         self.acquire_timeout -= sleep_time
 
       if self.acquire_timeout < 0:
-        self.logger.LogFatal('Could not acquire any of the '
-                             "following machines: '%s'" %
-                             ', '.join(machine.name for machine in machines))
+        self.logger.LogFatal(
+            'Could not acquire any of the '
+            "following machines: '%s'" % ', '.join(machine.name
+                                                   for machine in machines))
 
 ###      for m in self._machines:
 ###        if (m.locked and time.time() - m.released_time < 10 and
 ###            m.checksum == image_checksum):
 ###          return None
-      unlocked_machines = [machine
-                           for machine in self.GetAvailableMachines(label)
-                           if not machine.locked]
+      unlocked_machines = [
+          machine for machine in self.GetAvailableMachines(label)
+          if not machine.locked
+      ]
       for m in unlocked_machines:
         if image_checksum and m.checksum == image_checksum:
           m.locked = True
@@ -651,8 +643,8 @@
   """Mock machine manager class."""
 
   def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
-    super(MockMachineManager, self).__init__(
-        chromeos_root, acquire_timeout, log_level, locks_dir)
+    super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout,
+                                             log_level, locks_dir)
 
   def _TryToLockMachine(self, cros_machine):
     self._machines.append(cros_machine)
@@ -663,8 +655,8 @@
       for m in self._all_machines:
         assert m.name != machine_name, 'Tried to double-add %s' % machine_name
       cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
-      assert cm.machine_checksum, ('Could not find checksum for machine %s' %
-                                   machine_name)
+      assert cm.machine_checksum, (
+          'Could not find checksum for machine %s' % machine_name)
       # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
       # machine is unreachable, then its machine_checksum is None. Here we
       # cannot do this, because machine_checksum is always faked, so we directly
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 8652f17..b267d69 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -41,22 +41,21 @@
         assert m.name != machine_name, 'Tried to double-add %s' % machine_name
       cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
                                            'average')
-      assert cm.machine_checksum, ('Could not find checksum for machine %s' %
-                                   machine_name)
+      assert cm.machine_checksum, (
+          'Could not find checksum for machine %s' % machine_name)
       self._all_machines.append(cm)
 
 
 CHROMEOS_ROOT = '/tmp/chromeos-root'
 MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
-LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', 'autotest_dir',
-                              CHROMEOS_ROOT, 'lumpy',
-                              ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '',
-                              False, 'average,'
-                              'gcc', None)
+LABEL_LUMPY = label.MockLabel(
+    'lumpy', 'lumpy_chromeos_image', 'autotest_dir', CHROMEOS_ROOT, 'lumpy',
+    ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '', False, 'average,'
+    'gcc', None)
 LABEL_MIX = label.MockLabel('mix', 'chromeos_image', 'autotest_dir',
                             CHROMEOS_ROOT, 'mix',
-                            ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
-                            False, 'average', 'gcc', None)
+                            ['daisy1', 'daisy2', 'lumpy3',
+                             'lumpy4'], '', '', False, 'average', 'gcc', None)
 
 
 class MachineManagerTest(unittest.TestCase):
@@ -85,10 +84,9 @@
   def setUp(self, mock_isdir):
 
     mock_isdir.return_value = True
-    self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
-                                             'average', None,
-                                             self.mock_cmd_exec,
-                                             self.mock_logger)
+    self.mm = machine_manager.MachineManager(
+        '/usr/local/chromeos', 0, 'average', None, self.mock_cmd_exec,
+        self.mock_logger)
 
     self.mock_lumpy1.name = 'lumpy1'
     self.mock_lumpy2.name = 'lumpy2'
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 29e118e..04e6590 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -12,7 +12,6 @@
 import re
 import tempfile
 import json
-import sys
 
 from cros_utils import command_executer
 from cros_utils import misc
@@ -67,9 +66,8 @@
       if not os.path.isdir(dest_dir):
         command = 'mkdir -p %s' % dest_dir
         self.ce.RunCommand(command)
-      dest_file = os.path.join(dest_dir,
-                               ('%s.%s' % (os.path.basename(file_to_copy),
-                                           file_index)))
+      dest_file = os.path.join(
+          dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
       ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
       if ret:
         raise IOError('Could not copy results file: %s' % file_to_copy)
@@ -230,10 +228,10 @@
                                                        perf_data_file)
       perf_report_file = '%s.report' % perf_data_file
       if os.path.exists(perf_report_file):
-        raise RuntimeError('Perf report file already exists: %s' %
-                           perf_report_file)
-      chroot_perf_report_file = misc.GetInsideChrootPath(self.chromeos_root,
-                                                         perf_report_file)
+        raise RuntimeError(
+            'Perf report file already exists: %s' % perf_report_file)
+      chroot_perf_report_file = misc.GetInsideChrootPath(
+          self.chromeos_root, perf_report_file)
       perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
 
       perf_file = '/usr/sbin/perf'
@@ -366,8 +364,8 @@
       self.retval = pickle.load(f)
 
     # Untar the tarball to a temporary directory
-    self.temp_dir = tempfile.mkdtemp(
-        dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
+    self.temp_dir = tempfile.mkdtemp(dir=os.path.join(self.chromeos_root,
+                                                      'chroot', 'tmp'))
 
     command = ('cd %s && tar xf %s' %
                (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
@@ -439,8 +437,8 @@
     if ret:
       command = 'rm -rf {0}'.format(temp_dir)
       self.ce.RunCommand(command)
-      raise RuntimeError('Could not move dir %s to dir %s' %
-                         (temp_dir, cache_dir))
+      raise RuntimeError('Could not move dir %s to dir %s' % (temp_dir,
+                                                              cache_dir))
 
   @classmethod
   def CreateFromRun(cls,
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 9e97c9b..a2480d2 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -268,10 +268,10 @@
     self.result.CopyResultsTo('/tmp/results/')
     self.assertEqual(mockCopyFilesTo.call_count, 2)
     self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
-    self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
-                     ('/tmp/results/', perf_data_files))
-    self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
-                     ('/tmp/results/', perf_report_files))
+    self.assertEqual(mockCopyFilesTo.call_args_list[0][0], ('/tmp/results/',
+                                                            perf_data_files))
+    self.assertEqual(mockCopyFilesTo.call_args_list[1][0], ('/tmp/results/',
+                                                            perf_report_files))
 
   def test_get_new_keyvals(self):
     kv_dict = {}
@@ -436,8 +436,10 @@
     self.assertEqual(mock_runcmd.call_args_list[0][0],
                      ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
     self.assertEqual(mock_chrootruncmd.call_count, 1)
-    self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
-        '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
+    self.assertEqual(
+        mock_chrootruncmd.call_args_list[0][0],
+        ('/tmp',
+         ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
     self.assertEqual(mock_getpath.call_count, 1)
     self.assertEqual(mock_mkdtemp.call_count, 1)
     self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
@@ -899,9 +901,8 @@
     self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
                                 '/tmp', 'lumpy', 'remote', 'image_args',
                                 'cache_dir', 'average', 'gcc', None)
-    self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
-                                                        '/tmp/chromeos',
-                                                        'average')
+    self.mock_machine = machine_manager.MockCrosMachine(
+        'falco.cros', '/tmp/chromeos', 'average')
 
   def test_populate_from_run(self):
 
@@ -979,12 +980,10 @@
     def FakeGetMachines(label):
       if label:
         pass
-      m1 = machine_manager.MockCrosMachine('lumpy1.cros',
-                                           self.results_cache.chromeos_root,
-                                           'average')
-      m2 = machine_manager.MockCrosMachine('lumpy2.cros',
-                                           self.results_cache.chromeos_root,
-                                           'average')
+      m1 = machine_manager.MockCrosMachine(
+          'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+      m2 = machine_manager.MockCrosMachine(
+          'lumpy2.cros', self.results_cache.chromeos_root, 'average')
       return [m1, m2]
 
     mock_checksum.return_value = 'FakeImageChecksumabc123'
@@ -1026,12 +1025,10 @@
     def FakeGetMachines(label):
       if label:
         pass
-      m1 = machine_manager.MockCrosMachine('lumpy1.cros',
-                                           self.results_cache.chromeos_root,
-                                           'average')
-      m2 = machine_manager.MockCrosMachine('lumpy2.cros',
-                                           self.results_cache.chromeos_root,
-                                           'average')
+      m1 = machine_manager.MockCrosMachine(
+          'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+      m2 = machine_manager.MockCrosMachine(
+          'lumpy2.cros', self.results_cache.chromeos_root, 'average')
       return [m1, m2]
 
     mock_checksum.return_value = 'FakeImageChecksumabc123'
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 097c744..bda0cc1 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -47,6 +47,17 @@
   return [func() for _ in xrange(times)]
 
 
+def _DictWithReturnValues(retval, pass_fail):
+  """Create a new dictionary pre-populated with success/fail values."""
+  new_dict = {}
+  # Note: 0 is a valid retval; test to make sure it's not None.
+  if retval is not None:
+    new_dict['retval'] = retval
+  if pass_fail:
+    new_dict[''] = pass_fail
+  return new_dict
+
+
 def _GetNonDupLabel(max_dup, runs):
   """Create new list for the runs of the same label.
 
@@ -61,15 +72,19 @@
   """
   new_runs = []
   for run in runs:
+    run_retval = run.get('retval', None)
+    run_pass_fail = run.get('', None)
     new_run = {}
-    added_runs = _Repeat(dict, max_dup)
+    # pylint: disable=cell-var-from-loop
+    added_runs = _Repeat(
+        lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup)
     for key, value in run.iteritems():
       match = _DUP_KEY_REGEX.match(key)
       if not match:
         new_run[key] = value
       else:
         new_key, index_str = match.groups()
-        added_runs[int(index_str)-1][new_key] = str(value)
+        added_runs[int(index_str) - 1][new_key] = str(value)
     new_runs.append(new_run)
     new_runs += added_runs
   return new_runs
@@ -135,6 +150,7 @@
     result[name] = _Repeat(make_dicts, len(labels))
   return result
 
+
 def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
   """Create a dict from benchmark_runs.
 
@@ -180,10 +196,12 @@
     # (This can happen if, for example, the test has been disabled.)
     if len(cur_dict) == 1 and cur_dict['retval'] == 0:
       cur_dict['retval'] = 1
+      benchmark_run.result.keyvals['retval'] = 1
       # TODO: This output should be sent via logger.
-      print("WARNING: Test '%s' appears to have succeeded but returned"
-            ' no results.' % benchmark.name,
-            file=sys.stderr)
+      print(
+          "WARNING: Test '%s' appears to have succeeded but returned"
+          ' no results.' % benchmark.name,
+          file=sys.stderr)
     if json_report and benchmark_run.machine:
       cur_dict['machine'] = benchmark_run.machine.name
       cur_dict['machine_checksum'] = benchmark_run.machine.checksum
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index ccf0297..e765737 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,12 +3,11 @@
 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """Testing of ResultsOrganizer
 
    We create some labels, benchmark_runs and then create a ResultsOrganizer,
    after that, we compare the result of ResultOrganizer.
-   """
+"""
 
 from __future__ import print_function
 
@@ -20,55 +19,115 @@
 
 import mock_instance
 
-result = {'benchmark1': [[{'': 'PASS',
-                           'bool': 'True',
-                           'milliseconds_1': '1',
-                           'milliseconds_2': '8',
-                           'milliseconds_3': '9.2',
-                           'ms_1': '2.1',
-                           'total': '5'}, {'test': '2'}, {'test': '4'},
-                          {'': 'PASS',
-                           'bool': 'FALSE',
-                           'milliseconds_1': '3',
-                           'milliseconds_2': '5',
-                           'ms_1': '2.2',
-                           'total': '6'}, {'test': '3'}, {'test': '4'}],
-                         [{'': 'PASS',
-                           'bool': 'FALSE',
-                           'milliseconds_4': '30',
-                           'milliseconds_5': '50',
-                           'ms_1': '2.23',
-                           'total': '6'}, {'test': '5'}, {'test': '4'},
-                          {'': 'PASS',
-                           'bool': 'FALSE',
-                           'milliseconds_1': '3',
-                           'milliseconds_6': '7',
-                           'ms_1': '2.3',
-                           'total': '7'}, {'test': '2'}, {'test': '6'}]],
-          'benchmark2': [[{'': 'PASS',
-                           'bool': 'TRUE',
-                           'milliseconds_1': '3',
-                           'milliseconds_8': '6',
-                           'ms_1': '2.3',
-                           'total': '7'}, {'test': '2'}, {'test': '6'},
-                          {'': 'PASS',
-                           'bool': 'TRUE',
-                           'milliseconds_1': '3',
-                           'milliseconds_8': '6',
-                           'ms_1': '2.2',
-                           'total': '7'}, {'test': '2'}, {'test': '2'}],
-                         [{'': 'PASS',
-                           'bool': 'TRUE',
-                           'milliseconds_1': '3',
-                           'milliseconds_8': '6',
-                           'ms_1': '2',
-                           'total': '7'}, {'test': '2'}, {'test': '4'},
-                          {'': 'PASS',
-                           'bool': 'TRUE',
-                           'milliseconds_1': '3',
-                           'milliseconds_8': '6',
-                           'ms_1': '1',
-                           'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+result = {
+    'benchmark1': [[{
+        '': 'PASS',
+        'bool': 'True',
+        'milliseconds_1': '1',
+        'milliseconds_2': '8',
+        'milliseconds_3': '9.2',
+        'ms_1': '2.1',
+        'total': '5'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }, {
+        '': 'PASS',
+        'test': '4'
+    }, {
+        '': 'PASS',
+        'bool': 'FALSE',
+        'milliseconds_1': '3',
+        'milliseconds_2': '5',
+        'ms_1': '2.2',
+        'total': '6'
+    }, {
+        '': 'PASS',
+        'test': '3'
+    }, {
+        '': 'PASS',
+        'test': '4'
+    }], [{
+        '': 'PASS',
+        'bool': 'FALSE',
+        'milliseconds_4': '30',
+        'milliseconds_5': '50',
+        'ms_1': '2.23',
+        'total': '6'
+    }, {
+        '': 'PASS',
+        'test': '5'
+    }, {
+        '': 'PASS',
+        'test': '4'
+    }, {
+        '': 'PASS',
+        'bool': 'FALSE',
+        'milliseconds_1': '3',
+        'milliseconds_6': '7',
+        'ms_1': '2.3',
+        'total': '7'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }, {
+        '': 'PASS',
+        'test': '6'
+    }]],
+    'benchmark2': [[{
+        '': 'PASS',
+        'bool': 'TRUE',
+        'milliseconds_1': '3',
+        'milliseconds_8': '6',
+        'ms_1': '2.3',
+        'total': '7'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }, {
+        '': 'PASS',
+        'test': '6'
+    }, {
+        '': 'PASS',
+        'bool': 'TRUE',
+        'milliseconds_1': '3',
+        'milliseconds_8': '6',
+        'ms_1': '2.2',
+        'total': '7'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }], [{
+        '': 'PASS',
+        'bool': 'TRUE',
+        'milliseconds_1': '3',
+        'milliseconds_8': '6',
+        'ms_1': '2',
+        'total': '7'
+    }, {
+        '': 'PASS',
+        'test': '2'
+    }, {
+        '': 'PASS',
+        'test': '4'
+    }, {
+        '': 'PASS',
+        'bool': 'TRUE',
+        'milliseconds_1': '3',
+        'milliseconds_8': '6',
+        'ms_1': '1',
+        'total': '7'
+    }, {
+        '': 'PASS',
+        'test': '1'
+    }, {
+        '': 'PASS',
+        'test': '6'
+    }]]
+}
 
 
 class ResultOrganizerTest(unittest.TestCase):
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 7a46534..fac044f 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -92,9 +92,13 @@
 
 def _FilterPerfReport(event_threshold, report):
   """Filters out entries with `< event_threshold` percent in a perf report."""
+
   def filter_dict(m):
-    return {fn_name: pct for fn_name, pct in m.iteritems()
-            if pct >= event_threshold}
+    return {
+        fn_name: pct
+        for fn_name, pct in m.iteritems() if pct >= event_threshold
+    }
+
   return {event: filter_dict(m) for event, m in report.iteritems()}
 
 
@@ -109,8 +113,11 @@
   percentage of time spent in function_name).
   """
 
-  def __init__(self, benchmark_names_and_iterations, label_names,
-               read_perf_report, event_threshold=None):
+  def __init__(self,
+               benchmark_names_and_iterations,
+               label_names,
+               read_perf_report,
+               event_threshold=None):
     """Constructor.
 
     read_perf_report is a function that takes a label name, benchmark name, and
@@ -143,8 +150,8 @@
 
 
 def _GetResultsTableHeader(ben_name, iterations):
-  benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
-                    .format(ben_name, iterations))
+  benchmark_info = ('Benchmark:  {0};  Iterations: {1}'.format(
+      ben_name, iterations))
   cell = Cell()
   cell.string_value = benchmark_info
   cell.header = True
@@ -157,8 +164,9 @@
     if column.result.__class__.__name__ != 'RawResult':
       new_column.append(column)
     else:
-      new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
-                        for i in xrange(iteration))
+      new_column.extend(
+          Column(LiteralResult(i), Format(), str(i + 1))
+          for i in xrange(iteration))
   return new_column
 
 
@@ -199,9 +207,10 @@
     benchmark_data = p_table.perf_data[benchmark]
     table = []
     for event in benchmark_data:
-      tg = TableGenerator(benchmark_data[event],
-                          benchmark_results.label_names,
-                          sort=TableGenerator.SORT_BY_VALUES_DESC)
+      tg = TableGenerator(
+          benchmark_data[event],
+          benchmark_results.label_names,
+          sort=TableGenerator.SORT_BY_VALUES_DESC)
       table = tg.GetTable(ResultsReport.PERF_ROWS)
       parsed_columns = _ParseColumn(columns, iterations)
       tf = TableFormatter(table, parsed_columns)
@@ -227,22 +236,24 @@
     return get_tables(self.benchmark_results, columns, table_type)
 
   def GetFullTables(self, perf=False):
-    columns = [Column(RawResult(), Format()),
-               Column(MinResult(), Format()),
-               Column(MaxResult(), Format()),
-               Column(AmeanResult(), Format()),
-               Column(StdResult(), Format(), 'StdDev'),
-               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
-               Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
-               Column(PValueResult(), PValueFormat(), 'p-value')]
+    columns = [
+        Column(RawResult(), Format()), Column(MinResult(), Format()), Column(
+            MaxResult(), Format()), Column(AmeanResult(), Format()), Column(
+                StdResult(), Format(), 'StdDev'),
+        Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+            GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+                PValueResult(), PValueFormat(), 'p-value')
+    ]
     return self._GetTablesWithColumns(columns, 'full', perf)
 
   def GetSummaryTables(self, perf=False):
-    columns = [Column(AmeanResult(), Format()),
-               Column(StdResult(), Format(), 'StdDev'),
-               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
-               Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
-               Column(PValueResult(), PValueFormat(), 'p-value')]
+    columns = [
+        Column(AmeanResult(), Format()), Column(StdResult(), Format(),
+                                                'StdDev'),
+        Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+            GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+                PValueResult(), PValueFormat(), 'p-value')
+    ]
     return self._GetTablesWithColumns(columns, 'summary', perf)
 
 
@@ -299,12 +310,16 @@
   def GetStatusTable(self):
     """Generate the status table by the tabulator."""
     table = [['', '']]
-    columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
-               Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
+    columns = [
+        Column(LiteralResult(iteration=0), Format(), 'Status'), Column(
+            LiteralResult(iteration=1), Format(), 'Failing Reason')
+    ]
 
     for benchmark_run in self.experiment.benchmark_runs:
-      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
-                                     benchmark_run.failure_reason]]
+      status = [
+          benchmark_run.name,
+          [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
+      ]
       table.append(status)
     cell_table = TableFormatter(table, columns).GetCellTable('status')
     return [cell_table]
@@ -316,7 +331,7 @@
 
     sections = []
     if experiment is not None:
-      title_contents = "Results report for '%s'" % (experiment.name, )
+      title_contents = "Results report for '%s'" % (experiment.name,)
     else:
       title_contents = 'Results report'
     sections.append(self._MakeTitle(title_contents))
@@ -348,8 +363,10 @@
     # Fun fact: label_names is actually *entirely* useless as a param, since we
     # never add headers. We still need to pass it anyway.
     table = TableGenerator(runs, label_names).GetTable()
-    columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
-               Column(MaxResult(), Format())]
+    columns = [
+        Column(AmeanResult(), Format()), Column(MinResult(), Format()), Column(
+            MaxResult(), Format())
+    ]
     tf = TableFormatter(table, columns)
     data_table = tf.GetCellTable('full', headers=False)
 
@@ -365,10 +382,10 @@
       chart.AddSeries('Max', 'line', 'black')
       cur_index = 1
       for label in label_names:
-        chart.AddRow([label,
-                      cur_row_data[cur_index].value,
-                      cur_row_data[cur_index + 1].value,
-                      cur_row_data[cur_index + 2].value])
+        chart.AddRow([
+            label, cur_row_data[cur_index].value,
+            cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
+        ])
         if isinstance(cur_row_data[cur_index].value, str):
           chart = None
           break
@@ -387,8 +404,8 @@
 
   @staticmethod
   def FromExperiment(experiment):
-    return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
-                             experiment=experiment)
+    return HTMLResultsReport(
+        BenchmarkResults.FromExperiment(experiment), experiment=experiment)
 
   def GetReport(self):
     label_names = self.benchmark_results.label_names
@@ -404,13 +421,14 @@
     if self.experiment is not None:
       experiment_file = self.experiment.experiment_file
     # Use kwargs for sanity, and so that testing is a bit easier.
-    return templates.GenerateHTMLPage(perf_table=perf_table,
-                                      chart_js=chart_javascript,
-                                      summary_table=summary_table,
-                                      print_table=_PrintTable,
-                                      chart_divs=chart_divs,
-                                      full_table=full_table,
-                                      experiment_file=experiment_file)
+    return templates.GenerateHTMLPage(
+        perf_table=perf_table,
+        chart_js=chart_javascript,
+        summary_table=summary_table,
+        print_table=_PrintTable,
+        chart_divs=chart_divs,
+        full_table=full_table,
+        experiment_file=experiment_file)
 
 
 def ParseStandardPerfReport(report_data):
@@ -446,12 +464,12 @@
   #
   # Note that we're looking at stripped lines, so there is no space at the
   # start.
-  perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
-                          r'\s*\d+' # samples count (ignored)
-                          r'\s*\S+' # command (ignored)
-                          r'\s*\S+' # shared_object (ignored)
-                          r'\s*\[.\]' # location (ignored)
-                          r'\s*(\S.+)' # function
+  perf_regex = re.compile(r'^(\d+(?:.\d*)?)%'  # N.NN%
+                          r'\s*\d+'  # samples count (ignored)
+                          r'\s*\S+'  # command (ignored)
+                          r'\s*\S+'  # shared_object (ignored)
+                          r'\s*\[.\]'  # location (ignored)
+                          r'\s*(\S.+)'  # function
                          )
 
   stripped_lines = (l.strip() for l in report_data)
@@ -511,17 +529,23 @@
 # Split out so that testing (specifically: mocking) is easier
 def _ExperimentToKeyvals(experiment, for_json_report):
   """Converts an experiment to keyvals."""
-  return OrganizeResults(experiment.benchmark_runs, experiment.labels,
-                         json_report=for_json_report)
+  return OrganizeResults(
+      experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
 
 
 class BenchmarkResults(object):
   """The minimum set of fields that any ResultsReport will take."""
-  def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
+
+  def __init__(self,
+               label_names,
+               benchmark_names_and_iterations,
+               run_keyvals,
                read_perf_report=None):
     if read_perf_report is None:
+
       def _NoPerfReport(*_args, **_kwargs):
         return {}
+
       read_perf_report = _NoPerfReport
 
     self.label_names = label_names
@@ -557,10 +581,15 @@
   """If l is a list, extracts the first element of l. Otherwise, returns l."""
   return l[0] if isinstance(l, list) else l
 
+
 class JSONResultsReport(ResultsReport):
   """Class that generates JSON reports for experiments."""
 
-  def __init__(self, benchmark_results, date=None, time=None, experiment=None,
+  def __init__(self,
+               benchmark_results,
+               date=None,
+               time=None,
+               experiment=None,
                json_args=None):
     """Construct a JSONResultsReport.
 
@@ -589,8 +618,8 @@
 
   @staticmethod
   def FromExperiment(experiment, date=None, time=None, json_args=None):
-    benchmark_results = BenchmarkResults.FromExperiment(experiment,
-                                                        for_json_report=True)
+    benchmark_results = BenchmarkResults.FromExperiment(
+        experiment, for_json_report=True)
     return JSONResultsReport(benchmark_results, date, time, experiment,
                              json_args)
 
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
index 827649f..15ce582 100644
--- a/crosperf/results_report_templates.py
+++ b/crosperf/results_report_templates.py
@@ -14,6 +14,7 @@
   <a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
 </div>""")
 
+
 def _GetTabMenuHTML(table_name):
   # N.B. cgi.escape does some very basic HTML escaping. Nothing more.
   escaped = cgi.escape(table_name, quote=True)
@@ -28,10 +29,11 @@
 </div>
 """
 
+
 def _GetExperimentFileHTML(experiment_file_text):
   if not experiment_file_text:
     return ''
-  return _ExperimentFileHTML % (cgi.escape(experiment_file_text), )
+  return _ExperimentFileHTML % (cgi.escape(experiment_file_text),)
 
 
 _ResultsSectionHTML = Template("""
@@ -46,16 +48,17 @@
 </div>
 """)
 
+
 def _GetResultsSectionHTML(print_table, table_name, data):
   first_word = table_name.strip().split()[0]
   short_name = first_word.lower()
-  return _ResultsSectionHTML.substitute(sect_name=table_name,
-                                        html_table=print_table(data, 'HTML'),
-                                        text_table=print_table(data, 'PLAIN'),
-                                        tsv_table=print_table(data, 'TSV'),
-                                        tab_menu=_GetTabMenuHTML(short_name),
-                                        short_name=short_name)
-
+  return _ResultsSectionHTML.substitute(
+      sect_name=table_name,
+      html_table=print_table(data, 'HTML'),
+      text_table=print_table(data, 'PLAIN'),
+      tsv_table=print_table(data, 'TSV'),
+      tab_menu=_GetTabMenuHTML(short_name),
+      short_name=short_name)
 
 
 _MainHTML = Template("""
@@ -166,6 +169,7 @@
 </html>
 """)
 
+
 # It's a bit ugly that we take some HTML things, and some non-HTML things, but I
 # need to balance prettiness with time spent making things pretty.
 def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
@@ -189,8 +193,11 @@
   full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
                                            full_table)
   experiment_file_html = _GetExperimentFileHTML(experiment_file)
-  return _MainHTML.substitute(perf_init=perf_init, chart_js=chart_js,
-                              summary_table=summary_table_html,
-                              perf_html=perf_html, chart_divs=chart_divs,
-                              full_table=full_table_html,
-                              experiment_file=experiment_file_html)
+  return _MainHTML.substitute(
+      perf_init=perf_init,
+      chart_js=chart_js,
+      summary_table=summary_table_html,
+      perf_html=perf_html,
+      chart_divs=chart_divs,
+      full_table=full_table_html,
+      experiment_file=experiment_file_html)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index ed5c74f..2a23aa7 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -3,7 +3,6 @@
 # Copyright 2016 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 """Unittest for the results reporter."""
 
 from __future__ import division
@@ -50,10 +49,11 @@
         '/chromiumos_test_image.bin'
     buildbot_img = buildbot_case.split('/chroot/tmp')[1]
 
-    self.assertEqual(ParseChromeosImage(buildbot_case),
-                     ('R02-1.0', buildbot_img))
-    self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
-                     ('', os.path.dirname(buildbot_img)))
+    self.assertEqual(
+        ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
+    self.assertEqual(
+        ParseChromeosImage(os.path.dirname(buildbot_case)),
+        ('', os.path.dirname(buildbot_img)))
 
     # Ensure we don't act completely insanely given a few mildly insane paths.
     fun_case = '/chromiumos_test_image.bin'
@@ -66,6 +66,8 @@
 # There are many ways for this to be done better, but the linter complains
 # about all of them (that I can think of, at least).
 _fake_path_number = [0]
+
+
 def FakePath(ext):
   """Makes a unique path that shouldn't exist on the host system.
 
@@ -73,7 +75,7 @@
   error message, it may be easier to track it to its source.
   """
   _fake_path_number[0] += 1
-  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
+  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
   return os.path.join(prefix, ext)
 
 
@@ -121,14 +123,15 @@
   share_cache = ''
   locks_dir = ''
   log = logger.GetLogger()
-  machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
-                                       log_level, locks_dir)
+  machine_manager = MockMachineManager(
+      FakePath('chromeos_root'), 0, log_level, locks_dir)
   machine_manager.AddMachine('testing_machine')
   machine = next(m for m in machine_manager.GetMachines()
                  if m.name == 'testing_machine')
   for label in experiment.labels:
+
     def MakeSuccessfulRun(n):
-      run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
+      run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
                              1 + n + num_runs, cache_conditions,
                              machine_manager, log, log_level, share_cache)
       mock_result = MockResult(log, label, log_level, machine)
@@ -136,8 +139,8 @@
       run.result = mock_result
       return run
 
-    experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
-                                     for n in xrange(how_many))
+    experiment.benchmark_runs.extend(
+        MakeSuccessfulRun(n) for n in xrange(how_many))
   return experiment
 
 
@@ -160,7 +163,6 @@
     self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
     return text_report
 
-
   def testOutput(self):
     email_report = self._checkReport(email=True)
     text_report = self._checkReport(email=False)
@@ -177,12 +179,10 @@
   things are displayed. It just cares that they're present.
   """
 
-  _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
-                                                      'perf_html',
-                                                      'chart_js',
-                                                      'charts',
-                                                      'full_table',
-                                                      'experiment_file'])
+  _TestOutput = collections.namedtuple('TestOutput', [
+      'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
+      'experiment_file'
+  ])
 
   @staticmethod
   def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
@@ -192,12 +192,13 @@
     summary_table = print_table(summary_table, 'HTML')
     perf_html = print_table(perf_table, 'HTML')
     full_table = print_table(full_table, 'HTML')
-    return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
-                                             perf_html=perf_html,
-                                             chart_js=chart_js,
-                                             charts=chart_divs,
-                                             full_table=full_table,
-                                             experiment_file=experiment_file)
+    return HTMLResultsReportTest._TestOutput(
+        summary_table=summary_table,
+        perf_html=perf_html,
+        chart_js=chart_js,
+        charts=chart_divs,
+        full_table=full_table,
+        experiment_file=experiment_file)
 
   def _GetOutput(self, experiment=None, benchmark_results=None):
     with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
@@ -222,8 +223,8 @@
   def testSuccessfulOutput(self):
     num_success = 2
     success_keyvals = {'retval': 0, 'a_float': 3.96}
-    output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
-                                              success_keyvals))
+    output = self._GetOutput(
+        _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
 
     self.assertNotIn('no result', output.summary_table)
     #self.assertIn(success_keyvals['machine'], output.summary_table)
@@ -321,8 +322,17 @@
     benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
                                       ('bench3', 1), ('bench4', 0)]
     benchmark_keyvals = {
-        'bench1': [[{'retval': 1, 'foo': 2.0}]],
-        'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
+        'bench1': [[{
+            'retval': 1,
+            'foo': 2.0
+        }]],
+        'bench2': [[{
+            'retval': 1,
+            'foo': 4.0
+        }, {
+            'retval': -1,
+            'bar': 999
+        }]],
         # lack of retval is considered a failure.
         'bench3': [[{}]],
         'bench4': [[]]
@@ -341,8 +351,8 @@
     benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
     bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
                                      benchmark_keyvals)
-    reporter = JSONResultsReport(bench_results,
-                                 json_args={'separators': separators})
+    reporter = JSONResultsReport(
+        bench_results, json_args={'separators': separators})
     result_str = reporter.GetReport()
     self.assertIn(separators[0], result_str)
     self.assertIn(separators[1], result_str)
@@ -351,8 +361,17 @@
     labels = ['label1']
     benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
     benchmark_keyvals = {
-        'bench1': [[{'retval': 0, 'foo': 2.0}]],
-        'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
+        'bench1': [[{
+            'retval': 0,
+            'foo': 2.0
+        }]],
+        'bench2': [[{
+            'retval': 0,
+            'foo': 4.0
+        }, {
+            'retval': 0,
+            'bar': 999
+        }]]
     }
     bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
                                      benchmark_keyvals)
@@ -374,6 +393,7 @@
 
 class PerfReportParserTest(unittest.TestCase):
   """Tests for the perf report parser in results_report."""
+
   @staticmethod
   def _ReadRealPerfReport():
     my_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 90fe83a..e661f30 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -3,7 +3,6 @@
 # found in the LICENSE file.
 """Module to optimize the scheduling of benchmark_run tasks."""
 
-
 from __future__ import print_function
 
 import sys
@@ -48,7 +47,7 @@
     """Do the "run-test->(optionally reimage)->run-test" chore.
 
         Note - 'br' below means 'benchmark_run'.
-        """
+    """
 
     # Firstly, handle benchmarkruns that have cache hit.
     br = self._sched.get_cached_benchmark_run()
@@ -93,12 +92,12 @@
   def _reimage(self, label):
     """Reimage image to label.
 
-        Args:
-          label: the label to remimage onto dut.
+    Args:
+      label: the label to remimage onto dut.
 
-        Returns:
-          0 if successful, otherwise 1.
-        """
+    Returns:
+      0 if successful, otherwise 1.
+    """
 
     # Termination could happen anywhere, check it.
     if self._terminated:
@@ -111,8 +110,7 @@
       # Note, only 1 reimage at any given time, this is guaranteed in
       # ImageMachine, so no sync needed below.
       retval = self._sched.get_experiment().machine_manager.ImageMachine(
-          self._dut,
-          label)
+          self._dut, label)
 
       if retval:
         return 1
@@ -126,7 +124,7 @@
     """Execute a single benchmark_run.
 
         Note - this function never throws exceptions.
-        """
+    """
 
     # Termination could happen anywhere, check it.
     if self._terminated:
@@ -152,7 +150,7 @@
 
         If such match is found, we just skip doing reimage and jump to execute
         some benchmark_runs.
-        """
+    """
 
     checksum_file = '/usr/local/osimage_checksum_file'
     try:
@@ -166,8 +164,8 @@
         checksum = checksum.strip()
         for l in self._sched.get_labels():
           if l.checksum == checksum:
-            self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
-                self._dut.name, l))
+            self._logger.LogOutput(
+                "Dut '{}' is pre-installed with '{}'".format(self._dut.name, l))
             self._dut.label = l
             return
     except RuntimeError:
@@ -196,7 +194,7 @@
 
     On creation, each instance of this class is given a br_list, which is a
     subset of experiment._benchmark_runs.
-    """
+  """
 
   def __init__(self, schedv2, br_list):
     super(BenchmarkRunCacheReader, self).__init__()
@@ -272,7 +270,7 @@
         We do this by firstly creating a few threads, and then assign each
         thread a segment of all brs. Each thread will check cache status for
         each br and put those with cache into '_cached_br_list'.
-        """
+    """
 
     self._cached_br_list = []
     n_benchmarkruns = len(self._experiment.benchmark_runs)
@@ -287,16 +285,16 @@
     # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
     n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
     self._logger.LogOutput(('Starting {} threads to read cache status for '
-                            '{} benchmark runs ...').format(n_threads,
-                                                            n_benchmarkruns))
+                            '{} benchmark runs ...').format(
+                                n_threads, n_benchmarkruns))
     benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
     benchmarkrun_segments = []
     for i in range(n_threads - 1):
       start = i * benchmarkruns_per_thread
       end = (i + 1) * benchmarkruns_per_thread
       benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
-    benchmarkrun_segments.append(self._experiment.benchmark_runs[
-        (n_threads - 1) * benchmarkruns_per_thread:])
+    benchmarkrun_segments.append(self._experiment.benchmark_runs[(
+        n_threads - 1) * benchmarkruns_per_thread:])
 
     # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
     assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
@@ -314,9 +312,8 @@
       x.join()
 
     # Summarize.
-    self._logger.LogOutput(
-        'Total {} cache hit out of {} benchmark_runs.'.format(
-            len(self._cached_br_list), n_benchmarkruns))
+    self._logger.LogOutput('Total {} cache hit out of {} benchmark_runs.'.
+                           format(len(self._cached_br_list), n_benchmarkruns))
 
   def get_cached_run_list(self):
     return self._cached_br_list
@@ -338,9 +335,9 @@
   def get_cached_benchmark_run(self):
     """Get a benchmark_run with 'cache hit'.
 
-        Returns:
-          The benchmark that has cache hit, if any. Otherwise none.
-        """
+    Returns:
+      The benchmark that has cache hit, if any. Otherwise none.
+    """
 
     with self.lock_on('_cached_br_list'):
       if self._cached_br_list:
@@ -350,14 +347,14 @@
   def get_benchmark_run(self, dut):
     """Get a benchmark_run (br) object for a certain dut.
 
-        Args:
-          dut: the dut for which a br is returned.
+    Args:
+      dut: the dut for which a br is returned.
 
-        Returns:
-          A br with its label matching that of the dut. If no such br could be
-          found, return None (this usually means a reimage is required for the
-          dut).
-        """
+    Returns:
+      A br with its label matching that of the dut. If no such br could be
+      found, return None (this usually means a reimage is required for the
+      dut).
+    """
 
     # If terminated, stop providing any br.
     if self._terminated:
@@ -384,12 +381,12 @@
         The dut_worker calling this method is responsible for reimage the dut to
         this label.
 
-        Args:
-          dut: the new label that is to be reimaged onto the dut.
+    Args:
+      dut: the new label that is to be reimaged onto the dut.
 
-        Returns:
-          The label or None.
-        """
+    Returns:
+      The label or None.
+    """
 
     if self._terminated:
       return None
@@ -399,9 +396,9 @@
   def dut_worker_finished(self, dut_worker):
     """Notify schedv2 that the dut_worker thread finished.
 
-       Args:
-         dut_worker: the thread that is about to end.
-       """
+    Args:
+      dut_worker: the thread that is about to end.
+    """
 
     self._logger.LogOutput('{} finished.'.format(dut_worker))
     with self._workers_lock:
@@ -418,7 +415,7 @@
     """Mark flag so we stop providing br/reimages.
 
         Also terminate each DutWorker, so they refuse to execute br or reimage.
-        """
+    """
 
     self._terminated = True
     for dut_worker in self._active_workers:
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index be0fde4..250968d 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -72,11 +72,10 @@
     """Create fake experiment from string.
 
         Note - we mock out BenchmarkRun in this step.
-        """
+    """
     experiment_file = ExperimentFile(StringIO.StringIO(expstr))
-    experiment = ExperimentFactory().GetExperiment(experiment_file,
-                                                   working_directory='',
-                                                   log_dir='')
+    experiment = ExperimentFactory().GetExperiment(
+        experiment_file, working_directory='', log_dir='')
     return experiment
 
   def test_remote(self):
@@ -99,8 +98,8 @@
       return (cm.name != 'chromeos-daisy3.cros' and
               cm.name != 'chromeos-daisy5.cros')
 
-    with mock.patch('machine_manager.MockCrosMachine.IsReachable',
-                    new=MockIsReachable):
+    with mock.patch(
+        'machine_manager.MockCrosMachine.IsReachable', new=MockIsReachable):
       self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
       self.assertIn('chromeos-daisy1.cros', self.exp.remote)
       self.assertIn('chromeos-daisy2.cros', self.exp.remote)
@@ -119,8 +118,8 @@
   def test_BenchmarkRunCacheReader_1(self, reader):
     """Test benchmarkrun set is split into 5 segments."""
 
-    self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-        kraken_iterations=9))
+    self.exp = self._make_fake_experiment(
+        EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
     my_schedv2 = Schedv2(self.exp)
     self.assertFalse(my_schedv2.is_complete())
     # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
@@ -141,8 +140,8 @@
   def test_BenchmarkRunCacheReader_2(self, reader):
     """Test benchmarkrun set is split into 4 segments."""
 
-    self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-        kraken_iterations=8))
+    self.exp = self._make_fake_experiment(
+        EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
     my_schedv2 = Schedv2(self.exp)
     self.assertFalse(my_schedv2.is_complete())
     # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
@@ -156,8 +155,8 @@
   def test_BenchmarkRunCacheReader_3(self, reader):
     """Test benchmarkrun set is split into 2 segments."""
 
-    self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-        kraken_iterations=3))
+    self.exp = self._make_fake_experiment(
+        EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
     my_schedv2 = Schedv2(self.exp)
     self.assertFalse(my_schedv2.is_complete())
     # We have 3 * 2 == 6 brs, we use 2 threads.
@@ -169,8 +168,8 @@
   def test_BenchmarkRunCacheReader_4(self, reader):
     """Test benchmarkrun set is not splitted."""
 
-    self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-        kraken_iterations=1))
+    self.exp = self._make_fake_experiment(
+        EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
     my_schedv2 = Schedv2(self.exp)
     self.assertFalse(my_schedv2.is_complete())
     # We have 1 * 2 == 2 br, so only 1 instance.
@@ -183,18 +182,17 @@
     def MockReadCache(br):
       br.cache_hit = (br.label.name == 'image2')
 
-    with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
-                    new=MockReadCache):
+    with mock.patch(
+        'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
       # We have 2 * 30 brs, half of which are put into _cached_br_list.
-      self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-          kraken_iterations=30))
+      self.exp = self._make_fake_experiment(
+          EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
       my_schedv2 = Schedv2(self.exp)
       self.assertEquals(len(my_schedv2.get_cached_run_list()), 30)
       # The non-cache-hit brs are put into Schedv2._label_brl_map.
       self.assertEquals(
           reduce(lambda a, x: a + len(x[1]),
-                 my_schedv2.get_label_map().iteritems(),
-                 0), 30)
+                 my_schedv2.get_label_map().iteritems(), 0), 30)
 
   def test_nocachehit(self):
     """Test no cache-hit."""
@@ -202,18 +200,17 @@
     def MockReadCache(br):
       br.cache_hit = False
 
-    with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
-                    new=MockReadCache):
+    with mock.patch(
+        'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
       # We have 2 * 30 brs, none of which are put into _cached_br_list.
-      self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
-          kraken_iterations=30))
+      self.exp = self._make_fake_experiment(
+          EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
       my_schedv2 = Schedv2(self.exp)
       self.assertEquals(len(my_schedv2.get_cached_run_list()), 0)
       # The non-cache-hit brs are put into Schedv2._label_brl_map.
       self.assertEquals(
           reduce(lambda a, x: a + len(x[1]),
-                 my_schedv2.get_label_map().iteritems(),
-                 0), 60)
+                 my_schedv2.get_label_map().iteritems(), 0), 60)
 
 
 if __name__ == '__main__':
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index e42d82a..efbb534 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -29,9 +29,11 @@
     self.AddField(
         IntegerField(
             'iterations',
-            default=1,
-            description='Number of iterations to run the '
-            'test.'))
+            required=False,
+            default=0,
+            description='Number of iterations to run the test. '
+            'If not set, will run each benchmark test the optimum number of '
+            'times to get a stable result.'))
     self.AddField(
         TextField(
             'suite', default='', description='The type of the benchmark.'))
@@ -68,8 +70,8 @@
             'autotest_path',
             required=False,
             description='Autotest directory path relative to chroot which '
-            'has autotest files for the image to run tests requiring autotest files'
-        ))
+            'has autotest files for the image to run tests requiring autotest '
+            'files.'))
     self.AddField(
         TextField(
             'chromeos_root',
@@ -186,9 +188,11 @@
     self.AddField(
         IntegerField(
             'iterations',
-            default=1,
-            description='Number of iterations to run all '
-            'tests.'))
+            required=False,
+            default=0,
+            description='Number of iterations to run all tests. '
+            'If not set, will run each benchmark test the optimum number of '
+            'times to get a stable result.'))
     self.AddField(
         TextField(
             'chromeos_root',
@@ -256,8 +260,7 @@
             'you want to use. It accepts multiple directories '
             'separated by a ",".'))
     self.AddField(
-        TextField(
-            'results_dir', default='', description='The results dir.'))
+        TextField('results_dir', default='', description='The results dir.'))
     self.AddField(
         TextField(
             'locks_dir',
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 127bfd3..1ff6a13 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -1,6 +1,8 @@
 #!/usr/bin/env python2
 #
-# Copyright 2014 Google Inc. All Rights Reserved.
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 """Unittest for crosperf."""
 
 from __future__ import print_function
@@ -19,7 +21,7 @@
     self.assertEqual(len(res.fields), 6)
     self.assertEqual(res.GetField('test_name'), '')
     self.assertEqual(res.GetField('test_args'), '')
-    self.assertEqual(res.GetField('iterations'), 1)
+    self.assertEqual(res.GetField('iterations'), 0)
     self.assertEqual(res.GetField('suite'), '')
 
 
@@ -56,7 +58,7 @@
     self.assertEqual(res.GetField('rerun'), False)
     self.assertEqual(res.GetField('same_specs'), True)
     self.assertEqual(res.GetField('same_machine'), False)
-    self.assertEqual(res.GetField('iterations'), 1)
+    self.assertEqual(res.GetField('iterations'), 0)
     self.assertEqual(res.GetField('chromeos_root'), '')
     self.assertEqual(res.GetField('logging_level'), 'average')
     self.assertEqual(res.GetField('acquire_timeout'), 0)
@@ -77,18 +79,18 @@
     self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
                       'global', 'bad_type')
 
-    l_settings = settings_factory.SettingsFactory().GetSettings('label',
-                                                                'label')
+    l_settings = settings_factory.SettingsFactory().GetSettings(
+        'label', 'label')
     self.assertIsInstance(l_settings, settings_factory.LabelSettings)
     self.assertEqual(len(l_settings.fields), 9)
 
-    b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
-                                                                'benchmark')
+    b_settings = settings_factory.SettingsFactory().GetSettings(
+        'benchmark', 'benchmark')
     self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
     self.assertEqual(len(b_settings.fields), 6)
 
-    g_settings = settings_factory.SettingsFactory().GetSettings('global',
-                                                                'global')
+    g_settings = settings_factory.SettingsFactory().GetSettings(
+        'global', 'global')
     self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
     self.assertEqual(len(g_settings.fields), 25)
 
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index f1062f0..fea55c0 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -48,14 +48,12 @@
             'run the test.'))
     self.assertEqual(len(self.settings.fields), 1)
     # Adding the same field twice raises an exception.
-    self.assertRaises(
-        Exception,
-        self.settings.AddField, (IntegerField(
-            'iterations',
-            default=1,
-            required=False,
-            description='Number of iterations to run '
-            'the test.')))
+    self.assertRaises(Exception, self.settings.AddField, (IntegerField(
+        'iterations',
+        default=1,
+        required=False,
+        description='Number of iterations to run '
+        'the test.')))
     res = self.settings.fields['iterations']
     self.assertIsInstance(res, IntegerField)
     self.assertEqual(res.Get(), 1)
@@ -116,10 +114,10 @@
     self.assertEqual(res, 5)
 
   def test_inherit(self):
-    parent_settings = settings_factory.SettingsFactory().GetSettings('global',
-                                                                     'global')
-    label_settings = settings_factory.SettingsFactory().GetSettings('label',
-                                                                    'label')
+    parent_settings = settings_factory.SettingsFactory().GetSettings(
+        'global', 'global')
+    label_settings = settings_factory.SettingsFactory().GetSettings(
+        'label', 'label')
     self.assertEqual(parent_settings.GetField('chromeos_root'), '')
     self.assertEqual(label_settings.GetField('chromeos_root'), '')
     self.assertIsNone(label_settings.parent)
@@ -140,8 +138,8 @@
             'list of email addresses to send '
             'email to.'))
 
-    global_settings = settings_factory.SettingsFactory().GetSettings('global',
-                                                                     'global')
+    global_settings = settings_factory.SettingsFactory().GetSettings(
+        'global', 'global')
 
     global_settings.SetField('email', 'john.doe@google.com', append=True)
     global_settings.SetField('email', 'jane.smith@google.com', append=True)
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 678113a..bd27f28 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -78,8 +78,8 @@
                               (benchmark.name, i))
         break
       else:
-        self.logger.LogOutput('benchmark %s succeded on first try' %
-                              benchmark.name)
+        self.logger.LogOutput(
+            'benchmark %s succeded on first try' % benchmark.name)
         break
     return ret_tup
 
@@ -88,32 +88,37 @@
     # pyformat: disable
     set_cpu_freq = (
         'set -e && '
+        # Disable Turbo in Intel pstate driver
+        'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
+        'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+        # Set governor to performance for each cpu
         'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
         'cd $f; '
-        'val=0; '
-        'if [[ -e scaling_available_frequencies ]]; then '
-        # pylint: disable=line-too-long
-        '  val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
-        'else '
-        '  val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
-        'set -- $val; '
-        'highest=$1; '
-        'if [[ $# -gt 1 ]]; then '
-        '  case $highest in *1000) highest=$2;; esac; '
-        'fi ;'
-        'echo $highest > scaling_max_freq; '
-        'echo $highest > scaling_min_freq; '
         'echo performance > scaling_governor; '
+        # Uncomment rest of lines to enable setting frequency by crosperf
+        #'val=0; '
+        #'if [[ -e scaling_available_frequencies ]]; then '
+        # pylint: disable=line-too-long
+        #'  val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
+        #'else '
+        #'  val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
+        #'set -- $val; '
+        #'highest=$1; '
+        #'if [[ $# -gt 1 ]]; then '
+        #'  case $highest in *1000) highest=$2;; esac; '
+        #'fi ;'
+        #'echo $highest > scaling_max_freq; '
+        #'echo $highest > scaling_min_freq; '
         'done'
     )
     # pyformat: enable
     if self.log_level == 'average':
-      self.logger.LogOutput('Pinning governor execution frequencies for %s' %
-                            machine_name)
+      self.logger.LogOutput(
+          'Pinning governor execution frequencies for %s' % machine_name)
     ret = self._ce.CrosRunCommand(
         set_cpu_freq, machine=machine_name, chromeos_root=chromeos_root)
-    self.logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
-                           machine_name)
+    self.logger.LogFatalIf(
+        ret, 'Could not pin frequencies on machine: %s' % machine_name)
 
   def DecreaseWaitTime(self, machine_name, chromeos_root):
     """Change the ten seconds wait time for pagecycler to two seconds."""
@@ -218,11 +223,10 @@
       args_string = "test_args='%s'" % test_args
 
     cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
-           '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH, autotest_dir_arg,
-                                              fast_arg, label.board,
-                                              args_string, benchmark.run_local,
-                                              benchmark.test_name,
-                                              profiler_args, machine))
+           '{}" {} telemetry_Crosperf'.format(
+               TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
+               args_string, benchmark.run_local, benchmark.test_name,
+               profiler_args, machine))
 
     # Use --no-ns-pid so that cros_sdk does not create a different
     # process namespace and we can kill process created easily by their
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index fd8de66..78bdfbd 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -28,10 +28,9 @@
   mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
   mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
   mock_logger = mock.Mock(spec=logger.Logger)
-  mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '',
-                               '/tmp/chromeos', 'lumpy',
-                               ['lumpy1.cros', 'lumpy.cros2'], '', '', False,
-                               'average', 'gcc', '')
+  mock_label = label.MockLabel(
+      'lumpy', 'lumpy_chromeos_image', '', '/tmp/chromeos', 'lumpy',
+      ['lumpy1.cros', 'lumpy.cros2'], '', '', False, 'average', 'gcc', '')
   telemetry_crosperf_bench = Benchmark(
       'b1_test',  # name
       'octane',  # test_name
@@ -72,9 +71,8 @@
     self.call_telemetry_run = False
 
   def setUp(self):
-    self.runner = suite_runner.SuiteRunner(self.mock_logger, 'verbose',
-                                           self.mock_cmd_exec,
-                                           self.mock_cmd_term)
+    self.runner = suite_runner.SuiteRunner(
+        self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
 
   def test_get_profiler_args(self):
     input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
@@ -136,9 +134,9 @@
     self.assertTrue(self.call_telemetry_run)
     self.assertFalse(self.call_test_that_run)
     self.assertFalse(self.call_telemetry_crosperf_run)
-    self.assertEqual(
-        self.telemetry_run_args,
-        ['fake_machine', self.mock_label, self.telemetry_bench, ''])
+    self.assertEqual(self.telemetry_run_args, [
+        'fake_machine', self.mock_label, self.telemetry_bench, ''
+    ])
 
     reset()
     self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
@@ -147,9 +145,9 @@
     self.assertFalse(self.call_telemetry_run)
     self.assertTrue(self.call_test_that_run)
     self.assertFalse(self.call_telemetry_crosperf_run)
-    self.assertEqual(
-        self.test_that_args,
-        ['fake_machine', self.mock_label, self.test_that_bench, '', ''])
+    self.assertEqual(self.test_that_args, [
+        'fake_machine', self.mock_label, self.test_that_bench, '', ''
+    ])
 
     reset()
     self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
@@ -171,21 +169,12 @@
     # pyformat: disable
     set_cpu_cmd = (
         'set -e && '
+        # Disable Turbo in Intel pstate driver
+        'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
+        'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+        # Set governor to performance for each cpu
         'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
         'cd $f; '
-        'val=0; '
-        'if [[ -e scaling_available_frequencies ]]; then '
-        # pylint: disable=line-too-long
-        '  val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
-        'else '
-        '  val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
-        'set -- $val; '
-        'highest=$1; '
-        'if [[ $# -gt 1 ]]; then '
-        '  case $highest in *1000) highest=$2;; esac; '
-        'fi ;'
-        'echo $highest > scaling_max_freq; '
-        'echo $highest > scaling_min_freq; '
         'echo performance > scaling_governor; '
         'done'
     )
@@ -338,11 +327,12 @@
                                     self.telemetry_bench, '')
     self.assertEqual(res, 0)
     self.assertEqual(mock_runcmd.call_count, 1)
-    self.assertEqual(mock_runcmd.call_args_list[0][0], (
-        ('cd src/tools/perf && ./run_measurement '
-         '--browser=cros-chrome --output-format=csv '
-         '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
-         '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+    self.assertEqual(
+        mock_runcmd.call_args_list[0][0],
+        (('cd src/tools/perf && ./run_measurement '
+          '--browser=cros-chrome --output-format=csv '
+          '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+          '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
 
     self.real_logger.LogMsg = save_log_msg
 
diff --git a/file_lock_machine.py b/file_lock_machine.py
index 9b1d336..8493b08 100755
--- a/file_lock_machine.py
+++ b/file_lock_machine.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to lock/unlock machines."""
@@ -79,9 +79,11 @@
     return self.counter or self.exclusive
 
   def __str__(self):
-    return ' '.join(['Owner: %s' % self.owner, 'Exclusive: %s' % self.exclusive,
-                     'Counter: %s' % self.counter, 'Time: %s' % self.time,
-                     'Reason: %s' % self.reason, 'Auto: %s' % self.auto])
+    return ' '.join([
+        'Owner: %s' % self.owner, 'Exclusive: %s' % self.exclusive,
+        'Counter: %s' % self.counter, 'Time: %s' % self.time,
+        'Reason: %s' % self.reason, 'Auto: %s' % self.auto
+    ])
 
 
 class FileLock(object):
@@ -120,9 +122,8 @@
           (os.path.basename(file_lock.getFilePath),
            file_lock.getDescription().owner,
            file_lock.getDescription().exclusive,
-           file_lock.getDescription().counter,
-           elapsed_time, file_lock.getDescription().reason,
-           file_lock.getDescription().auto))
+           file_lock.getDescription().counter, elapsed_time,
+           file_lock.getDescription().reason, file_lock.getDescription().auto))
     table = '\n'.join(lock_strings)
     return '\n'.join([header, table])
 
@@ -199,8 +200,8 @@
     with FileLock(self._lock_file) as lock:
       if lock.exclusive:
         self._logger.LogError(
-            'Exclusive lock already acquired by %s. Reason: %s' %
-            (lock.owner, lock.reason))
+            'Exclusive lock already acquired by %s. Reason: %s' % (lock.owner,
+                                                                   lock.reason))
         return False
 
       if exclusive:
@@ -245,9 +246,10 @@
         lock.owner = ''
 
         if self._auto:
-          del_list = [i
-                      for i in FileLock.FILE_OPS
-                      if i.name == FileCheckName(self._lock_file)]
+          del_list = [
+              i for i in FileLock.FILE_OPS
+              if i.name == FileCheckName(self._lock_file)
+          ]
           for i in del_list:
             FileLock.FILE_OPS.remove(i)
           for f in del_list:
@@ -287,8 +289,7 @@
       if locked or not timeout >= 0:
         break
       print('Lock not acquired for {0}, wait {1} seconds ...'.format(
-          self._name,
-          sleep))
+          self._name, sleep))
       time.sleep(sleep)
       timeout -= sleep
     return locked
@@ -302,41 +303,43 @@
   """The main function."""
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('-r',
-                      '--reason',
-                      dest='reason',
-                      default='',
-                      help='The lock reason.')
-  parser.add_argument('-u',
-                      '--unlock',
-                      dest='unlock',
-                      action='store_true',
-                      default=False,
-                      help='Use this to unlock.')
-  parser.add_argument('-l',
-                      '--list_locks',
-                      dest='list_locks',
-                      action='store_true',
-                      default=False,
-                      help='Use this to list locks.')
-  parser.add_argument('-f',
-                      '--ignore_ownership',
-                      dest='ignore_ownership',
-                      action='store_true',
-                      default=False,
-                      help="Use this to force unlock on a lock you don't own.")
-  parser.add_argument('-s',
-                      '--shared',
-                      dest='shared',
-                      action='store_true',
-                      default=False,
-                      help='Use this for a shared (non-exclusive) lock.')
-  parser.add_argument('-d',
-                      '--dir',
-                      dest='locks_dir',
-                      action='store',
-                      default=Machine.LOCKS_DIR,
-                      help='Use this to set different locks_dir')
+  parser.add_argument(
+      '-r', '--reason', dest='reason', default='', help='The lock reason.')
+  parser.add_argument(
+      '-u',
+      '--unlock',
+      dest='unlock',
+      action='store_true',
+      default=False,
+      help='Use this to unlock.')
+  parser.add_argument(
+      '-l',
+      '--list_locks',
+      dest='list_locks',
+      action='store_true',
+      default=False,
+      help='Use this to list locks.')
+  parser.add_argument(
+      '-f',
+      '--ignore_ownership',
+      dest='ignore_ownership',
+      action='store_true',
+      default=False,
+      help="Use this to force unlock on a lock you don't own.")
+  parser.add_argument(
+      '-s',
+      '--shared',
+      dest='shared',
+      action='store_true',
+      default=False,
+      help='Use this for a shared (non-exclusive) lock.')
+  parser.add_argument(
+      '-d',
+      '--dir',
+      dest='locks_dir',
+      action='store',
+      default=Machine.LOCKS_DIR,
+      help='Use this to set different locks_dir')
   parser.add_argument('args', nargs='*', help='Machine arg.')
 
   options = parser.parse_args(argv)
diff --git a/generate-waterfall-reports.py b/generate-waterfall-reports.py
index ed8e369..8a80905 100755
--- a/generate-waterfall-reports.py
+++ b/generate-waterfall-reports.py
@@ -32,13 +32,9 @@
 from cros_utils import command_executer
 
 # All the test suites whose data we might want for the reports.
-TESTS = (
-    ('bvt-inline', 'HWTest'),
-    ('bvt-cq', 'HWTest'),
-    ('toolchain-tests', 'HWTest'),
-    ('security', 'HWTest'),
-    ('kernel_daily_regression', 'HWTest'),
-    ('kernel_daily_benchmarks', 'HWTest'),)
+TESTS = (('bvt-inline', 'HWTest'), ('bvt-cq', 'HWTest'), ('security', 'HWTest'),
+         ('kernel_daily_regression', 'HWTest'), ('kernel_daily_benchmarks',
+                                                 'HWTest'),)
 
 # The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM
 # LISTED IN THE REPORT.
@@ -127,8 +123,8 @@
 
 def GetBuildID(build_bot, date):
   """Get the build id for a build_bot at a given date."""
-  day = '{day:02d}'.format(day=date%100)
-  mon = MONTHS[date/100%100]
+  day = '{day:02d}'.format(day=date % 100)
+  mon = MONTHS[date / 100 % 100]
   date_string = mon + ' ' + day
   if build_bot in WATERFALL_BUILDERS:
     url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \
@@ -136,7 +132,7 @@
   if build_bot in ROTATING_BUILDERS:
     url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \
           'builders/%s?numbuilds=200' % build_bot
-  command = 'sso_client %s' %url
+  command = 'sso_client %s' % url
   retval = 1
   retry_time = 3
   while retval and retry_time:
@@ -237,13 +233,13 @@
     out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' %
                    (waterfall_type, date_string))
     out_file.write('                                                          '
-                   '                          kernel       kernel\n')
+                   '                kernel       kernel\n')
     out_file.write('                         Build    bvt-         bvt-cq     '
-                   'toolchain-   security     daily        daily\n')
+                   ' security       daily        daily\n')
     out_file.write('                         status  inline                   '
-                   '  tests                 regression   benchmarks\n')
+                   '              regression   benchmarks\n')
     out_file.write('                               [P/ F/ DR]*   [P/ F /DR]*  '
-                   '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n')
+                   '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n')
 
     # Write daily waterfall status section.
     for i in range(0, len(report_list)):
@@ -262,9 +258,7 @@
       inline_color = build_dict.get('bvt-inline-color', '')
       cq_color = build_dict.get('bvt-cq-color', '')
       if 'x86' not in builder:
-        toolchain = build_dict.get('toolchain-tests', '[??/ ?? /??]')
         security = build_dict.get('security', '[??/ ?? /??]')
-        toolchain_color = build_dict.get('toolchain-tests-color', '')
         security_color = build_dict.get('security-color', '')
         if 'gcc' in builder:
           regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]')
@@ -272,20 +266,18 @@
           regression_color = build_dict.get('kernel_daily_regression-color', '')
           bench_color = build_dict.get('kernel_daily_benchmarks-color', '')
           out_file.write('                                  %6s        %6s'
-                         '       %6s      %6s      %6s      %6s\n' %
-                         (inline_color, cq_color, toolchain_color,
-                          security_color, regression_color, bench_color))
-          out_file.write('%25s %3s  %s %s %s %s %s %s\n' % (builder, status,
-                                                            inline, cq,
-                                                            toolchain, security,
-                                                            regression, bench))
+                         '      %6s      %6s      %6s\n' %
+                         (inline_color, cq_color, security_color,
+                          regression_color, bench_color))
+          out_file.write('%25s %3s  %s %s %s %s %s\n' %
+                         (builder, status, inline, cq, security, regression,
+                          bench))
         else:
           out_file.write('                                  %6s        %6s'
-                         '       %6s      %6s\n' % (inline_color, cq_color,
-                                                    toolchain_color,
-                                                    security_color))
-          out_file.write('%25s %3s  %s %s %s %s\n' % (builder, status, inline,
-                                                      cq, toolchain, security))
+                         '      %6s\n' % (inline_color, cq_color,
+                                          security_color))
+          out_file.write('%25s %3s  %s %s %s\n' % (builder, status, inline, cq,
+                                                   security))
       else:
         out_file.write('                                  %6s        %6s\n' %
                        (inline_color, cq_color))
@@ -372,8 +364,9 @@
     build_dict['date'] = report_date
 
   if 'board' in build_dict and build_dict['board'] != board:
-    raise RuntimeError('Error: Two different boards (%s,%s) in one build (%s)!'
-                       % (board, build_dict['board'], build_link))
+    raise RuntimeError(
+        'Error: Two different boards (%s,%s) in one build (%s)!' %
+        (board, build_dict['board'], build_link))
   build_dict['board'] = board
 
   color_key = '%s-color' % test
@@ -819,9 +812,8 @@
     EmailReport(main_report, 'Main', format_date(int_date))
     shutil.copy(main_report, ARCHIVE_DIR)
   if rotating_report_dict and not main_only and not failures_report:
-    rotating_report = GenerateWaterfallReport(rotating_report_dict,
-                                              failure_dict, 'rotating',
-                                              int_date, omit_failures)
+    rotating_report = GenerateWaterfallReport(
+        rotating_report_dict, failure_dict, 'rotating', int_date, omit_failures)
     EmailReport(rotating_report, 'Rotating', format_date(int_date))
     shutil.copy(rotating_report, ARCHIVE_DIR)
 
diff --git a/get_common_image_version.py b/get_common_image_version.py
index da36b98..4bb6949 100755
--- a/get_common_image_version.py
+++ b/get_common_image_version.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2013 Google Inc. All Rights Reserved.
 """Script to find list of common images (first beta releases) in Chromeos.
@@ -63,11 +63,12 @@
   """Get ChromeOS first betas list from history URL."""
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('--serialize',
-                      dest='serialize',
-                      default=None,
-                      help='Save list of common images into the specified '
-                      'file.')
+  parser.add_argument(
+      '--serialize',
+      dest='serialize',
+      default=None,
+      help='Save list of common images into the specified '
+      'file.')
   options = parser.parse_args(argv)
 
   try:
diff --git a/heat_map.py b/heat_map.py
index ae234b5..39e3f8f 100755
--- a/heat_map.py
+++ b/heat_map.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 # Copyright 2015 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -15,6 +15,7 @@
 
 from cros_utils import command_executer
 
+
 def IsARepoRoot(directory):
   """Returns True if directory is the root of a repo checkout."""
   return os.path.exists(os.path.join(directory, '.repo'))
@@ -37,8 +38,8 @@
     self.perf_report = ''
 
   def copyFileToChroot(self):
-    self.tempDir = tempfile.mkdtemp(
-        prefix=os.path.join(self.chromeos_root, 'src/'))
+    self.tempDir = tempfile.mkdtemp(prefix=os.path.join(self.chromeos_root,
+                                                        'src/'))
     self.temp_perf = os.path.join(self.tempDir, 'perf.data')
     shutil.copy2(self.perf_data, self.temp_perf)
     self.temp_perf_inchroot = os.path.join('~/trunk/src',
@@ -101,24 +102,25 @@
   """
   parser = argparse.ArgumentParser()
 
-  parser.add_argument('--chromeos_root',
-                      dest='chromeos_root',
-                      required=True,
-                      help='ChromeOS root to use for generate heatmaps.')
-  parser.add_argument('--perf_data',
-                      dest='perf_data',
-                      required=True,
-                      help='The raw perf data.')
-  parser.add_argument('--binary',
-                      dest='binary',
-                      required=False,
-                      help='The name of the binary.',
-                      default='chrome')
-  parser.add_argument('--page_size',
-                      dest='page_size',
-                      required=False,
-                      help='The page size for heat maps.',
-                      default=4096)
+  parser.add_argument(
+      '--chromeos_root',
+      dest='chromeos_root',
+      required=True,
+      help='ChromeOS root to use for generate heatmaps.')
+  parser.add_argument(
+      '--perf_data', dest='perf_data', required=True, help='The raw perf data.')
+  parser.add_argument(
+      '--binary',
+      dest='binary',
+      required=False,
+      help='The name of the binary.',
+      default='chrome')
+  parser.add_argument(
+      '--page_size',
+      dest='page_size',
+      required=False,
+      help='The page size for heat maps.',
+      default=4096)
   options = parser.parse_args(argv)
 
   if not IsARepoRoot(options.chromeos_root):
diff --git a/image_chromeos.py b/image_chromeos.py
index d95434a..f65ad4d 100755
--- a/image_chromeos.py
+++ b/image_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2011 Google Inc. All Rights Reserved.
 """Script to image a ChromeOS device.
@@ -41,9 +41,8 @@
 
   # Check to see if remote machine has cherrypy, ctypes
   command = "python -c 'import cherrypy, ctypes'"
-  ret = cmd_executer.CrosRunCommand(command,
-                                    chromeos_root=chromeos_root,
-                                    machine=remote)
+  ret = cmd_executer.CrosRunCommand(
+      command, chromeos_root=chromeos_root, machine=remote)
   logger.GetLogger().LogFatalIf(
       ret == 255, 'Failed ssh to %s (for checking cherrypy)' % remote)
   logger.GetLogger().LogFatalIf(
@@ -51,41 +50,57 @@
       'cros flash cannot work.'.format(remote))
 
 
+def DisableCrosBeeps(chromeos_root, remote, log_level):
+  """Disable annoying chromebooks beeps after reboots."""
+  cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
+
+  command = '/usr/share/vboot/bin/set_gbb_flags.sh 0x1'
+  logger.GetLogger().LogOutput('Trying to disable beeping.')
+
+  ret, o, _ = cmd_executer.CrosRunCommandWOutput(
+      command, chromeos_root=chromeos_root, machine=remote)
+  if ret != 0:
+    logger.GetLogger().LogOutput(o)
+    logger.GetLogger().LogOutput('Failed to disable beeps.')
+
+
 def DoImage(argv):
   """Image ChromeOS."""
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      help='Target directory for ChromeOS installation.')
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      help='Target directory for ChromeOS installation.')
   parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
   parser.add_argument('-i', '--image', dest='image', help='Image binary file.')
-  parser.add_argument('-b',
-                      '--board',
-                      dest='board',
-                      help='Target board override.')
-  parser.add_argument('-f',
-                      '--force',
-                      dest='force',
-                      action='store_true',
-                      default=False,
-                      help='Force an image even if it is non-test.')
-  parser.add_argument('-n',
-                      '--no_lock',
-                      dest='no_lock',
-                      default=False,
-                      action='store_true',
-                      help='Do not attempt to lock remote before imaging.  '
-                      'This option should only be used in cases where the '
-                      'exclusive lock has already been acquired (e.g. in '
-                      'a script that calls this one).')
-  parser.add_argument('-l',
-                      '--logging_level',
-                      dest='log_level',
-                      default='verbose',
-                      help='Amount of logging to be used. Valid levels are '
-                      "'quiet', 'average', and 'verbose'.")
+  parser.add_argument(
+      '-b', '--board', dest='board', help='Target board override.')
+  parser.add_argument(
+      '-f',
+      '--force',
+      dest='force',
+      action='store_true',
+      default=False,
+      help='Force an image even if it is non-test.')
+  parser.add_argument(
+      '-n',
+      '--no_lock',
+      dest='no_lock',
+      default=False,
+      action='store_true',
+      help='Do not attempt to lock remote before imaging.  '
+      'This option should only be used in cases where the '
+      'exclusive lock has already been acquired (e.g. in '
+      'a script that calls this one).')
+  parser.add_argument(
+      '-l',
+      '--logging_level',
+      dest='log_level',
+      default='verbose',
+      help='Amount of logging to be used. Valid levels are '
+      "'quiet', 'average', and 'verbose'.")
   parser.add_argument('-a', '--image_args', dest='image_args')
 
   options = parser.parse_args(argv[1:])
@@ -148,9 +163,7 @@
 
       command = 'cat ' + checksum_file
       ret, device_checksum, _ = cmd_executer.CrosRunCommandWOutput(
-          command,
-          chromeos_root=options.chromeos_root,
-          machine=options.remote)
+          command, chromeos_root=options.chromeos_root, machine=options.remote)
 
       device_checksum = device_checksum.strip()
       image_checksum = str(image_checksum)
@@ -159,9 +172,8 @@
       l.LogOutput('Device checksum: ' + device_checksum)
 
       if image_checksum != device_checksum:
-        [found, located_image] = LocateOrCopyImage(options.chromeos_root,
-                                                   image,
-                                                   board=board)
+        [found, located_image] = LocateOrCopyImage(
+            options.chromeos_root, image, board=board)
 
         reimage = True
         l.LogOutput('Checksums do not match. Re-imaging...')
@@ -180,9 +192,8 @@
     if reimage:
       # If the device has /tmp mounted as noexec, image_to_live.sh can fail.
       command = 'mount -o remount,rw,exec /tmp'
-      cmd_executer.CrosRunCommand(command,
-                                  chromeos_root=options.chromeos_root,
-                                  machine=options.remote)
+      cmd_executer.CrosRunCommand(
+          command, chromeos_root=options.chromeos_root, machine=options.remote)
 
       real_src_dir = os.path.join(
           os.path.realpath(options.chromeos_root), 'src')
@@ -202,8 +213,13 @@
       # Check to see if cros flash will work for the remote machine.
       CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
 
-      cros_flash_args = ['cros', 'flash', '--board=%s' % board,
-                         '--clobber-stateful', options.remote]
+      # Disable the annoying chromebook beeps after reboot.
+      DisableCrosBeeps(options.chromeos_root, options.remote, log_level)
+
+      cros_flash_args = [
+          'cros', 'flash',
+          '--board=%s' % board, '--clobber-stateful', options.remote
+      ]
       if local_image:
         cros_flash_args.append(chroot_image)
       else:
@@ -220,9 +236,8 @@
       while True:
         if log_level == 'quiet':
           l.LogOutput('CMD : %s' % command)
-        ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
-                                            command,
-                                            command_timeout=1800)
+        ret = cmd_executer.ChrootRunCommand(
+            options.chromeos_root, command, command_timeout=1800)
         if ret == 0 or retries >= 2:
           break
         retries += 1
@@ -255,17 +270,15 @@
         if log_level == 'average':
           l.LogOutput('Verifying image.')
         command = 'echo %s > %s && chmod -w %s' % (image_checksum,
-                                                   checksum_file,
-                                                   checksum_file)
+                                                   checksum_file, checksum_file)
         ret = cmd_executer.CrosRunCommand(
             command,
             chromeos_root=options.chromeos_root,
             machine=options.remote)
         logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
 
-        successfully_imaged = VerifyChromeChecksum(options.chromeos_root,
-                                                   image, options.remote,
-                                                   log_level)
+        successfully_imaged = VerifyChromeChecksum(options.chromeos_root, image,
+                                                   options.remote, log_level)
         logger.GetLogger().LogFatalIf(not successfully_imaged,
                                       'Image verification failed!')
         TryRemountPartitionAsRW(options.chromeos_root, options.remote,
@@ -297,8 +310,7 @@
   images_list = glob.glob(images_glob)
   for potential_image in images_list:
     if filecmp.cmp(potential_image, image):
-      l.LogOutput('Found matching image %s in chromeos_root.' %
-                  potential_image)
+      l.LogOutput('Found matching image %s in chromeos_root.' % potential_image)
       return [True, potential_image]
   # We did not find an image. Copy it in the src dir and return the copied
   # file.
@@ -321,9 +333,9 @@
                    './mount_gpt_image.sh --from=%s --image=%s'
                    ' --safe --read_only'
                    ' --rootfs_mountpt=%s'
-                   ' --stateful_mountpt=%s' % (chromeos_root, image_dir,
-                                               image_file, rootfs_mp,
-                                               stateful_mp))
+                   ' --stateful_mountpt=%s' %
+                   (chromeos_root, image_dir, image_file, rootfs_mp,
+                    stateful_mp))
   return mount_command
 
 
@@ -351,12 +363,8 @@
   lsb_release_file = os.path.join(rootfs_mp, 'etc/lsb-release')
   lsb_release_contents = open(lsb_release_file).read()
   is_test_image = re.search('test', lsb_release_contents, re.IGNORECASE)
-  MountImage(chromeos_root,
-             image,
-             rootfs_mp,
-             stateful_mp,
-             log_level,
-             unmount=True)
+  MountImage(
+      chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
   return is_test_image
 
 
@@ -365,20 +373,14 @@
   rootfs_mp = tempfile.mkdtemp()
   stateful_mp = tempfile.mkdtemp()
   MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
-  image_chrome_checksum = FileUtils().Md5File('%s/opt/google/chrome/chrome' %
-                                              rootfs_mp,
-                                              log_level=log_level)
-  MountImage(chromeos_root,
-             image,
-             rootfs_mp,
-             stateful_mp,
-             log_level,
-             unmount=True)
+  image_chrome_checksum = FileUtils().Md5File(
+      '%s/opt/google/chrome/chrome' % rootfs_mp, log_level=log_level)
+  MountImage(
+      chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
 
   command = 'md5sum /opt/google/chrome/chrome'
-  [_, o, _] = cmd_executer.CrosRunCommandWOutput(command,
-                                                 chromeos_root=chromeos_root,
-                                                 machine=remote)
+  [_, o, _] = cmd_executer.CrosRunCommandWOutput(
+      command, chromeos_root=chromeos_root, machine=remote)
   device_chrome_checksum = o.split()[0]
   if image_chrome_checksum.strip() == device_chrome_checksum.strip():
     return True
@@ -415,12 +417,11 @@
   while True:
     current_time = time.time()
     if current_time - start_time > timeout:
-      l.LogError('Timeout of %ss reached. Machine still not up. Aborting.' %
-                 timeout)
+      l.LogError(
+          'Timeout of %ss reached. Machine still not up. Aborting.' % timeout)
       return False
-    ret = cmd_executer.CrosRunCommand(command,
-                                      chromeos_root=chromeos_root,
-                                      machine=remote)
+    ret = cmd_executer.CrosRunCommand(
+        command, chromeos_root=chromeos_root, machine=remote)
     if not ret:
       return True
 
diff --git a/produce_output.py b/produce_output.py
index 30deea8..46512c4 100755
--- a/produce_output.py
+++ b/produce_output.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
-"""This simulates a real job by producing a lot of output.
-
-"""
+"""This simulates a real job by producing a lot of output."""
 
 from __future__ import print_function
 
diff --git a/remote_gcc_build.py b/remote_gcc_build.py
index 52cedfb..edd0d2b 100755
--- a/remote_gcc_build.py
+++ b/remote_gcc_build.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 
 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
@@ -28,6 +28,7 @@
 
 # pylint: disable=anomalous-backslash-in-string
 
+
 def GetPatchNum(output):
   lines = output.splitlines()
   line = [l for l in lines if 'googlesource' in l][0]
@@ -137,16 +138,18 @@
   print(str(rversion))
   #  ls_cmd = ("gsutil ls gs://chromeos-image-archive/trybot-{0}/{1}-b{2}"
   #            .format(target, rversion, index))
-  ls_cmd = ('gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}'
-            .format(target, index))
+  ls_cmd = ('gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}'.format(
+      target, index))
 
   download_cmd = ('$(which gsutil) cp {0} {1}'.format('{0}', dest))
   ce = command_executer.GetCommandExecuter()
 
   _, out, _ = ce.RunCommandWOutput(ls_cmd, print_to_console=True)
   lines = out.splitlines()
-  download_files = ['autotest.tar', 'chromeos-chrome', 'chromiumos_test_image',
-                    'debug.tgz', 'sysroot_chromeos-base_chromeos-chrome.tar.xz']
+  download_files = [
+      'autotest.tar', 'chromeos-chrome', 'chromiumos_test_image', 'debug.tgz',
+      'sysroot_chromeos-base_chromeos-chrome.tar.xz'
+  ]
   for line in lines:
     if any([e in line for e in download_files]):
       cmd = download_cmd.format(line)
@@ -163,8 +166,8 @@
               'tar xjf {1} -C {0} &&'
               'tar xzf {0}/debug.tgz  -C {0}/usr/lib/ &&'
               'tar xf {0}/autotest.tar -C {0}/usr/local/ &&'
-              'tar xJf {0}/chromiumos_test_image.tar.xz -C {0}'
-              .format(dest, chrome_tbz2))
+              'tar xJf {0}/chromiumos_test_image.tar.xz -C {0}'.format(
+                  dest, chrome_tbz2))
   ce = command_executer.GetCommandExecuter()
   return ce.RunCommand(commands)
 
@@ -220,8 +223,8 @@
   """Up load patch to gerrit, return patch number."""
   commands = ('git add -A . &&'
               "git commit -m 'test' -m 'BUG=None' -m 'TEST=None' "
-              "-m 'hostname={0}' -m 'source={1}'"
-              .format(socket.gethostname(), source))
+              "-m 'hostname={0}' -m 'source={1}'".format(
+                  socket.gethostname(), source))
   ce = command_executer.GetCommandExecuter()
   ce.RunCommand(commands)
 
@@ -335,59 +338,64 @@
   """The main function."""
   # Common initializations
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      required=True,
-                      dest='chromeos_root',
-                      help='The chromeos_root')
-  parser.add_argument('-g',
-                      '--gcc_dir',
-                      default='',
-                      dest='gcc_dir',
-                      help='The gcc dir')
-  parser.add_argument('-t',
-                      '--target',
-                      required=True,
-                      dest='target',
-                      help=('The target to be build, the list is at'
-                            ' $(chromeos_root)/chromite/buildbot/cbuildbot'
-                            ' --list -all'))
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      required=True,
+      dest='chromeos_root',
+      help='The chromeos_root')
+  parser.add_argument(
+      '-g', '--gcc_dir', default='', dest='gcc_dir', help='The gcc dir')
+  parser.add_argument(
+      '-t',
+      '--target',
+      required=True,
+      dest='target',
+      help=('The target to be build, the list is at'
+            ' $(chromeos_root)/chromite/buildbot/cbuildbot'
+            ' --list -all'))
   parser.add_argument('-l', '--local', action='store_true')
-  parser.add_argument('-d',
-                      '--dest_dir',
-                      dest='dest_dir',
-                      help=('The dir to build the whole chromeos if'
-                            ' --local is set'))
-  parser.add_argument('--chrome_version',
-                      dest='chrome_version',
-                      default='',
-                      help='The chrome version to use. '
-                      'Default it will use the latest one.')
-  parser.add_argument('--chromeos_version',
-                      dest='chromeos_version',
-                      default='',
-                      help=('The chromeos version to use.'
-                            '(1) A release version in the format: '
-                            "'\d+\.\d+\.\d+\.\d+.*'"
-                            "(2) 'latest_lkgm' for the latest lkgm version"))
-  parser.add_argument('-r',
-                      '--replace_sysroot',
-                      action='store_true',
-                      help=('Whether or not to replace the build/$board dir'
-                            'under the chroot of chromeos_root and copy '
-                            'the image to src/build/image/$board/latest.'
-                            ' Default is False'))
-  parser.add_argument('-b',
-                      '--branch',
-                      dest='branch',
-                      default='',
-                      help=('The branch to run trybot, default is None'))
-  parser.add_argument('-p',
-                      '--patch',
-                      dest='patch',
-                      default='',
-                      help=('The patches to be applied, the patches numbers '
-                            "be seperated by ','"))
+  parser.add_argument(
+      '-d',
+      '--dest_dir',
+      dest='dest_dir',
+      help=('The dir to build the whole chromeos if'
+            ' --local is set'))
+  parser.add_argument(
+      '--chrome_version',
+      dest='chrome_version',
+      default='',
+      help='The chrome version to use. '
+      'Default it will use the latest one.')
+  parser.add_argument(
+      '--chromeos_version',
+      dest='chromeos_version',
+      default='',
+      help=('The chromeos version to use.'
+            '(1) A release version in the format: '
+            "'\d+\.\d+\.\d+\.\d+.*'"
+            "(2) 'latest_lkgm' for the latest lkgm version"))
+  parser.add_argument(
+      '-r',
+      '--replace_sysroot',
+      action='store_true',
+      help=('Whether or not to replace the build/$board dir'
+            'under the chroot of chromeos_root and copy '
+            'the image to src/build/image/$board/latest.'
+            ' Default is False'))
+  parser.add_argument(
+      '-b',
+      '--branch',
+      dest='branch',
+      default='',
+      help=('The branch to run trybot, default is None'))
+  parser.add_argument(
+      '-p',
+      '--patch',
+      dest='patch',
+      default='',
+      help=('The patches to be applied, the patches numbers '
+            "be seperated by ','"))
 
   script_dir = os.path.dirname(os.path.realpath(__file__))
 
@@ -400,7 +408,7 @@
   chromeos_root = misc.CanonicalizePath(args.chromeos_root)
   if args.chromeos_version and args.branch:
     raise RuntimeError('You can not set chromeos_version and branch at the '
-                      'same time.')
+                       'same time.')
 
   manifests = None
   if args.branch:
diff --git a/remote_kill_test.py b/remote_kill_test.py
index 71a6690..e0f29d0 100755
--- a/remote_kill_test.py
+++ b/remote_kill_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to wrap test_that script.
@@ -26,14 +26,13 @@
 
 def Main(argv):
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      help='ChromeOS root checkout directory')
-  parser.add_argument('-r',
-                      '--remote',
-                      dest='remote',
-                      help='Remote chromeos device.')
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      help='ChromeOS root checkout directory')
+  parser.add_argument(
+      '-r', '--remote', dest='remote', help='Remote chromeos device.')
 
   _ = parser.parse_args(argv)
   ce = command_executer.GetCommandExecuter()
diff --git a/remote_test.py b/remote_test.py
index 82f54ed..62598d5 100755
--- a/remote_test.py
+++ b/remote_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to wrap test_that script.
@@ -26,14 +26,13 @@
 
 def Main(argv):
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      help='ChromeOS root checkout directory')
-  parser.add_argument('-r',
-                      '--remote',
-                      dest='remote',
-                      help='Remote chromeos device.')
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      help='ChromeOS root checkout directory')
+  parser.add_argument(
+      '-r', '--remote', dest='remote', help='Remote chromeos device.')
   options = parser.parse_args(argv)
   if options.chromeos_root is None:
     Usage(parser, 'chromeos_root must be given')
@@ -45,50 +44,55 @@
 
   command = 'ls -lt /'
   ce = command_executer.GetCommandExecuter()
-  ce.CrosRunCommand(command,
-                    chromeos_root=options.chromeos_root,
-                    machine=options.remote)
+  ce.CrosRunCommand(
+      command, chromeos_root=options.chromeos_root, machine=options.remote)
 
   version_dir_path, script_name = misc.GetRoot(sys.argv[0])
   version_dir = misc.GetRoot(version_dir_path)[1]
 
   # Tests to copy directories and files to the chromeos box.
-  ce.CopyFiles(version_dir_path,
-               '/tmp/' + version_dir,
-               dest_machine=options.remote,
-               dest_cros=True,
-               chromeos_root=options.chromeos_root)
-  ce.CopyFiles(version_dir_path,
-               '/tmp/' + version_dir + '1',
-               dest_machine=options.remote,
-               dest_cros=True,
-               chromeos_root=options.chromeos_root)
-  ce.CopyFiles(sys.argv[0],
-               '/tmp/' + script_name,
-               recursive=False,
-               dest_machine=options.remote,
-               dest_cros=True,
-               chromeos_root=options.chromeos_root)
-  ce.CopyFiles(sys.argv[0],
-               '/tmp/' + script_name + '1',
-               recursive=False,
-               dest_machine=options.remote,
-               dest_cros=True,
-               chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      version_dir_path,
+      '/tmp/' + version_dir,
+      dest_machine=options.remote,
+      dest_cros=True,
+      chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      version_dir_path,
+      '/tmp/' + version_dir + '1',
+      dest_machine=options.remote,
+      dest_cros=True,
+      chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      sys.argv[0],
+      '/tmp/' + script_name,
+      recursive=False,
+      dest_machine=options.remote,
+      dest_cros=True,
+      chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      sys.argv[0],
+      '/tmp/' + script_name + '1',
+      recursive=False,
+      dest_machine=options.remote,
+      dest_cros=True,
+      chromeos_root=options.chromeos_root)
 
   # Test to copy directories and files from the chromeos box.
-  ce.CopyFiles('/tmp/' + script_name,
-               '/tmp/hello',
-               recursive=False,
-               src_machine=options.remote,
-               src_cros=True,
-               chromeos_root=options.chromeos_root)
-  ce.CopyFiles('/tmp/' + script_name,
-               '/tmp/' + script_name,
-               recursive=False,
-               src_machine=options.remote,
-               src_cros=True,
-               chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      '/tmp/' + script_name,
+      '/tmp/hello',
+      recursive=False,
+      src_machine=options.remote,
+      src_cros=True,
+      chromeos_root=options.chromeos_root)
+  ce.CopyFiles(
+      '/tmp/' + script_name,
+      '/tmp/' + script_name,
+      recursive=False,
+      src_machine=options.remote,
+      src_cros=True,
+      chromeos_root=options.chromeos_root)
   board = ce.CrosLearnBoard(options.chromeos_root, options.remote)
   print(board)
   return 0
diff --git a/repo_to_repo.py b/repo_to_repo.py
index 3b3b9bc..91c5d58 100755
--- a/repo_to_repo.py
+++ b/repo_to_repo.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Module for transferring files between various types of repositories."""
@@ -23,6 +23,7 @@
 
 # pylint: disable=anomalous-backslash-in-string
 
+
 def GetCanonicalMappings(mappings):
   canonical_mappings = []
   for mapping in mappings:
@@ -101,8 +102,8 @@
     return self._ce.RunCommand(command)
 
   def __str__(self):
-    return '\n'.join(str(s)
-                     for s in [self.repo_type, self.address, self.mappings])
+    return '\n'.join(
+        str(s) for s in [self.repo_type, self.address, self.mappings])
 
 
 # Note - this type of repo is used only for "readonly", in other words, this
@@ -130,7 +131,6 @@
 class P4Repo(Repo):
   """Class for P4 repositories."""
 
-
   def __init__(self, address, mappings, revision=None):
     Repo.__init__(self)
     self.repo_type = 'p4'
@@ -143,9 +143,8 @@
     client_name += tempfile.mkstemp()[1].replace('/', '-')
     mappings = self.mappings
     p4view = perforce.View('depot2', GetCanonicalMappings(mappings))
-    p4client = perforce.CommandsFactory(self._root_dir,
-                                        p4view,
-                                        name=client_name)
+    p4client = perforce.CommandsFactory(
+        self._root_dir, p4view, name=client_name)
     command = p4client.SetupAndDo(p4client.Sync(self.revision))
     ret = self._ce.RunCommand(command)
     assert ret == 0, 'Could not setup client.'
@@ -225,16 +224,16 @@
   def SetupForPush(self):
     with misc.WorkingDirectory(self._root_dir):
       ret = self._CloneSources()
-      logger.GetLogger().LogFatalIf(ret, 'Could not clone git repo %s.' %
-                                    self.address)
+      logger.GetLogger().LogFatalIf(
+          ret, 'Could not clone git repo %s.' % self.address)
 
       command = 'git branch -a | grep -wq %s' % self.branch
       ret = self._ce.RunCommand(command)
 
       if ret == 0:
         if self.branch != 'master':
-          command = ('git branch --track %s remotes/origin/%s' %
-                     (self.branch, self.branch))
+          command = ('git branch --track %s remotes/origin/%s' % (self.branch,
+                                                                  self.branch))
         else:
           command = 'pwd'
         command += '&& git checkout %s' % self.branch
@@ -270,8 +269,8 @@
       if self.gerrit:
         label = 'somelabel'
         command = 'git remote add %s %s' % (label, self.address)
-        command += ('&& git push %s %s HEAD:refs/for/master' %
-                    (push_args, label))
+        command += ('&& git push %s %s HEAD:refs/for/master' % (push_args,
+                                                                label))
       else:
         command = 'git push -v %s origin %s:%s' % (push_args, self.branch,
                                                    self.branch)
@@ -334,11 +333,12 @@
     elif repo_type == 'svn':
       repo = SvnRepo(repo_address, repo_mappings)
     elif repo_type == 'git':
-      repo = GitRepo(repo_address,
-                     repo_branch,
-                     mappings=repo_mappings,
-                     ignores=repo_ignores,
-                     gerrit=gerrit)
+      repo = GitRepo(
+          repo_address,
+          repo_branch,
+          mappings=repo_mappings,
+          ignores=repo_ignores,
+          gerrit=gerrit)
     elif repo_type == 'file':
       repo = FileRepo(repo_address)
     else:
@@ -349,24 +349,27 @@
 @logger.HandleUncaughtExceptions
 def Main(argv):
   parser = argparse.ArgumentParser()
-  parser.add_argument('-i',
-                      '--input_file',
-                      dest='input_file',
-                      help='The input file that contains repo descriptions.')
+  parser.add_argument(
+      '-i',
+      '--input_file',
+      dest='input_file',
+      help='The input file that contains repo descriptions.')
 
-  parser.add_argument('-n',
-                      '--dry_run',
-                      dest='dry_run',
-                      action='store_true',
-                      default=False,
-                      help='Do a dry run of the push.')
+  parser.add_argument(
+      '-n',
+      '--dry_run',
+      dest='dry_run',
+      action='store_true',
+      default=False,
+      help='Do a dry run of the push.')
 
-  parser.add_argument('-F',
-                      '--message_file',
-                      dest='message_file',
-                      default=None,
-                      help=('Use contents of the log file as the commit '
-                            'message.'))
+  parser.add_argument(
+      '-F',
+      '--message_file',
+      dest='message_file',
+      default=None,
+      help=('Use contents of the log file as the commit '
+            'message.'))
 
   options = parser.parse_args(argv)
   if not options.input_file:
@@ -401,9 +404,10 @@
 
   commit_message = 'Synced repos to: %s' % ','.join(input_revisions)
   for output_repo in output_repos:
-    ret = output_repo.PushSources(commit_message=commit_message,
-                                  dry_run=options.dry_run,
-                                  message_file=options.message_file)
+    ret = output_repo.PushSources(
+        commit_message=commit_message,
+        dry_run=options.dry_run,
+        message_file=options.message_file)
     if ret:
       return ret
 
diff --git a/run_tests.py b/run_tests.py
index c755278..e1b8ca2 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to wrap run_remote_tests.sh script.
@@ -10,9 +10,9 @@
 
 __author__ = 'asharif@google.com (Ahmad Sharif)'
 
-
 import sys
 
+
 def Main():
   """The main function."""
   print('This script is deprecated.  Use crosperf for running tests.')
diff --git a/setup_chromeos.py b/setup_chromeos.py
index b6f9f4d..0b51d83 100755
--- a/setup_chromeos.py
+++ b/setup_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
@@ -99,13 +99,15 @@
 def Main(argv):
   """Checkout the ChromeOS source."""
   parser = argparse.ArgumentParser()
-  parser.add_argument('--dir',
-                      dest='directory',
-                      help='Target directory for ChromeOS installation.')
-  parser.add_argument('--version',
-                      dest='version',
-                      default='latest_lkgm',
-                      help="""ChromeOS version. Can be:
+  parser.add_argument(
+      '--dir',
+      dest='directory',
+      help='Target directory for ChromeOS installation.')
+  parser.add_argument(
+      '--version',
+      dest='version',
+      default='latest_lkgm',
+      help="""ChromeOS version. Can be:
 (1) A release version in the format: 'X.X.X.X'
 (2) 'top' for top of trunk
 (3) 'latest_lkgm' for the latest lkgm version
@@ -113,31 +115,32 @@
 (5) 'latest_common' for the latest team common stable version
 (6) 'common' for the team common stable version before timestamp
 Default is 'latest_lkgm'.""")
-  parser.add_argument('--timestamp',
-                      dest='timestamp',
-                      default=None,
-                      help='Timestamps in epoch format. It will check out the'
-                      'latest LKGM or the latest COMMON version of ChromeOS'
-                      ' before the timestamp. Use in combination with'
-                      ' --version=latest or --version=common. Use '
-                      '"date -d <date string> +%s" to find epoch time')
-  parser.add_argument('--minilayout',
-                      dest='minilayout',
-                      default=False,
-                      action='store_true',
-                      help='Whether to checkout the minilayout (smaller '
-                      'checkout).')
-  parser.add_argument('--jobs',
-                      '-j',
-                      dest='jobs',
-                      help='Number of repo sync threads to use.')
-  parser.add_argument('--public',
-                      '-p',
-                      dest='public',
-                      default=False,
-                      action='store_true',
-                      help='Use the public checkout instead of the private '
-                      'one.')
+  parser.add_argument(
+      '--timestamp',
+      dest='timestamp',
+      default=None,
+      help='Timestamps in epoch format. It will check out the'
+      'latest LKGM or the latest COMMON version of ChromeOS'
+      ' before the timestamp. Use in combination with'
+      ' --version=latest or --version=common. Use '
+      '"date -d <date string> +%s" to find epoch time')
+  parser.add_argument(
+      '--minilayout',
+      dest='minilayout',
+      default=False,
+      action='store_true',
+      help='Whether to checkout the minilayout (smaller '
+      'checkout).')
+  parser.add_argument(
+      '--jobs', '-j', dest='jobs', help='Number of repo sync threads to use.')
+  parser.add_argument(
+      '--public',
+      '-p',
+      dest='public',
+      default=False,
+      action='store_true',
+      help='Use the public checkout instead of the private '
+      'one.')
 
   options = parser.parse_args(argv)
 
@@ -167,20 +170,16 @@
     versions_repo = ('https://chromium.googlesource.com/'
                      'chromiumos/manifest-versions.git')
   else:
-    manifest_repo = (
-        'https://chrome-internal.googlesource.com/chromeos/'
-        'manifest-internal.git'
-    )
-    versions_repo = (
-        'https://chrome-internal.googlesource.com/chromeos/'
-        'manifest-versions.git'
-    )
+    manifest_repo = ('https://chrome-internal.googlesource.com/chromeos/'
+                     'manifest-internal.git')
+    versions_repo = ('https://chrome-internal.googlesource.com/chromeos/'
+                     'manifest-versions.git')
 
   if version == 'top':
     init = 'repo init -u %s' % manifest_repo
   elif version == 'latest_lkgm':
     manifests = manifest_versions.ManifestVersions()
-    version = manifests.TimeToVersion(time.mktime(time.gmtime()))
+    version = manifests.TimeToVersionChromeOS(time.mktime(time.gmtime()))
     version, manifest = version.split('.', 1)
     logger.GetLogger().LogOutput('found version %s.%s for latest LKGM' %
                                  (version, manifest))
@@ -194,8 +193,9 @@
     manifests = manifest_versions.ManifestVersions()
     version = manifests.TimeToVersion(timestamp)
     version, manifest = version.split('.', 1)
-    logger.GetLogger().LogOutput('found version %s.%s for LKGM at timestamp %s'
-                                 % (version, manifest, timestamp))
+    logger.GetLogger().LogOutput(
+        'found version %s.%s for LKGM at timestamp %s' % (version, manifest,
+                                                          timestamp))
     init = ('repo init -u %s -m paladin/buildspecs/%s/%s.xml' %
             (versions_repo, version, manifest))
     del manifests
diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py
index 573e558..d919c96 100755
--- a/tc_enter_chroot.py
+++ b/tc_enter_chroot.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script to enter the ChromeOS chroot with mounted sources.
@@ -86,41 +86,47 @@
   """The main function."""
 
   parser = argparse.ArgumentParser()
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      default='../..',
-                      help='ChromeOS root checkout directory.')
-  parser.add_argument('-t',
-                      '--toolchain_root',
-                      dest='toolchain_root',
-                      help='Toolchain root directory.')
-  parser.add_argument('-o',
-                      '--output',
-                      dest='output',
-                      help='Toolchain output directory')
-  parser.add_argument('--sudo',
-                      dest='sudo',
-                      action='store_true',
-                      default=False,
-                      help='Run the command with sudo.')
-  parser.add_argument('-r',
-                      '--third_party',
-                      dest='third_party',
-                      help='The third_party directory to mount.')
-  parser.add_argument('-m',
-                      '--other_mounts',
-                      dest='other_mounts',
-                      help='Other mount points in the form: '
-                      'dir:mounted_dir:options')
-  parser.add_argument('-s',
-                      '--mount-scripts-only',
-                      dest='mount_scripts_only',
-                      action='store_true',
-                      default=False,
-                      help='Mount only the scripts dir, and not the sources.')
-  parser.add_argument('passthrough_argv', nargs='*',
-                      help='Command to be executed inside the chroot.')
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      default='../..',
+      help='ChromeOS root checkout directory.')
+  parser.add_argument(
+      '-t',
+      '--toolchain_root',
+      dest='toolchain_root',
+      help='Toolchain root directory.')
+  parser.add_argument(
+      '-o', '--output', dest='output', help='Toolchain output directory')
+  parser.add_argument(
+      '--sudo',
+      dest='sudo',
+      action='store_true',
+      default=False,
+      help='Run the command with sudo.')
+  parser.add_argument(
+      '-r',
+      '--third_party',
+      dest='third_party',
+      help='The third_party directory to mount.')
+  parser.add_argument(
+      '-m',
+      '--other_mounts',
+      dest='other_mounts',
+      help='Other mount points in the form: '
+      'dir:mounted_dir:options')
+  parser.add_argument(
+      '-s',
+      '--mount-scripts-only',
+      dest='mount_scripts_only',
+      action='store_true',
+      default=False,
+      help='Mount only the scripts dir, and not the sources.')
+  parser.add_argument(
+      'passthrough_argv',
+      nargs='*',
+      help='Command to be executed inside the chroot.')
 
   options = parser.parse_args(argv)
 
@@ -137,8 +143,10 @@
     m = 'toolchain_root not specified. Will not mount toolchain dirs.'
     logger.GetLogger().LogWarning(m)
   else:
-    tc_dirs = [options.toolchain_root + '/google_vendor_src_branch/gcc',
-               options.toolchain_root + '/google_vendor_src_branch/binutils']
+    tc_dirs = [
+        options.toolchain_root + '/google_vendor_src_branch/gcc',
+        options.toolchain_root + '/google_vendor_src_branch/binutils'
+    ]
 
   for tc_dir in tc_dirs:
     if not os.path.exists(tc_dir):
@@ -154,9 +162,9 @@
     sys.exit(1)
 
   if not os.path.exists(chromeos_root + '/src/scripts/build_packages'):
-    logger.GetLogger(
-    ).LogError(options.chromeos_root + '/src/scripts/build_packages'
-               ' not found!')
+    logger.GetLogger().LogError(options.chromeos_root +
+                                '/src/scripts/build_packages'
+                                ' not found!')
     parser.print_help()
     sys.exit(1)
 
@@ -176,16 +184,16 @@
   # Add the third_party mount point if it exists
   if options.third_party:
     third_party_dir = options.third_party
-    logger.GetLogger().LogFatalIf(
-        not os.path.isdir(third_party_dir),
-        '--third_party option is not a valid dir.')
+    logger.GetLogger().LogFatalIf(not os.path.isdir(third_party_dir),
+                                  '--third_party option is not a valid dir.')
   else:
-    third_party_dir = os.path.abspath('%s/../../../third_party' %
-                                      os.path.dirname(__file__))
+    third_party_dir = os.path.abspath(
+        '%s/../../../third_party' % os.path.dirname(__file__))
 
   if os.path.isdir(third_party_dir):
-    mount_point = MountPoint(third_party_dir, ('%s/%s' % (
-        full_mounted_tc_root, os.path.basename(third_party_dir))),
+    mount_point = MountPoint(third_party_dir,
+                             ('%s/%s' % (full_mounted_tc_root,
+                                         os.path.basename(third_party_dir))),
                              getpass.getuser())
     mount_points.append(mount_point)
 
@@ -195,8 +203,8 @@
     output = options.toolchain_root + '/output'
 
   if output:
-    mount_points.append(MountPoint(output, full_mounted_tc_root + '/output',
-                                   getpass.getuser()))
+    mount_points.append(
+        MountPoint(output, full_mounted_tc_root + '/output', getpass.getuser()))
 
   # Mount the other mount points
   mount_points += CreateMountPointsFromString(options.other_mounts,
@@ -235,16 +243,16 @@
       inner_command = inner_command[3:]
     command_file = 'tc_enter_chroot.cmd'
     command_file_path = chromeos_root + '/src/scripts/' + command_file
-    retv = command_executer.GetCommandExecuter().RunCommand('sudo rm -f ' +
-                                                            command_file_path)
+    retv = command_executer.GetCommandExecuter().RunCommand(
+        'sudo rm -f ' + command_file_path)
     if retv != 0:
       return retv
     f = open(command_file_path, 'w')
     f.write(inner_command)
     f.close()
     logger.GetLogger().LogCmd(inner_command)
-    retv = command_executer.GetCommandExecuter().RunCommand('chmod +x ' +
-                                                            command_file_path)
+    retv = command_executer.GetCommandExecuter().RunCommand(
+        'chmod +x ' + command_file_path)
     if retv != 0:
       return retv
 
diff --git a/test_gcc_dejagnu.py b/test_gcc_dejagnu.py
index 41304a0..cd2e0cd 100755
--- a/test_gcc_dejagnu.py
+++ b/test_gcc_dejagnu.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2010 Google Inc. All Rights Reserved.
 """Script adapter used by automation client for testing dejagnu.
@@ -41,8 +41,10 @@
     self._cleanup = cleanup
 
   def SetupChromeOS(self):
-    cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
-           '--minilayout', '--jobs=8']
+    cmd = [
+        setup_chromeos.__file__, '--dir=' + self._chromeos_root, '--minilayout',
+        '--jobs=8'
+    ]
     ret = setup_chromeos.Main(cmd)
     if ret:
       raise RuntimeError('Failed to checkout chromeos')
@@ -54,9 +56,8 @@
 
   def SetupBoard(self):
     cmd = './setup_board --board=' + self._board
-    ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
-                                          cmd,
-                                          terminated_timeout=4000)
+    ret = self._cmd_exec.ChrootRunCommand(
+        self._chromeos_root, cmd, terminated_timeout=4000)
     if ret:
       raise RuntimeError('Failed to setup board.')
 
@@ -73,17 +74,20 @@
     ret = self._cmd_exec.RunCommand(cmd)
 
   def BuildGCC(self):
-    build_gcc_args = [build_tc.__file__, '--board=' + self._board,
-                      '--chromeos_root=' + self._chromeos_root,
-                      '--gcc_dir=' + self._gcc_dir]
+    build_gcc_args = [
+        build_tc.__file__, '--board=' + self._board,
+        '--chromeos_root=' + self._chromeos_root, '--gcc_dir=' + self._gcc_dir
+    ]
     ret = build_tc.Main(build_gcc_args)
     if ret:
       raise RuntimeError('Building gcc failed.')
 
   def CheckGCC(self):
-    args = [run_dejagnu.__file__, '--board=' + self._board,
-            '--chromeos_root=' + self._chromeos_root,
-            '--mount=' + self._gcc_dir, '--remote=' + self._remote]
+    args = [
+        run_dejagnu.__file__, '--board=' + self._board,
+        '--chromeos_root=' + self._chromeos_root, '--mount=' + self._gcc_dir,
+        '--remote=' + self._remote
+    ]
     if self._cleanup:
       args.append('--cleanup=' + self._cleanup)
     if self._runtestflags:
@@ -102,9 +106,9 @@
     print(l)
     if not start_counting and 'Build results not in the manifest' in l:
       start_counting = True
-    elif start_counting and l and (
-        l.find('UNRESOLVED:') == 0 or l.find('FAIL:') == 0 or
-        l.find('XFAIL:') == 0 or l.find('XPASS:') == 0):
+    elif start_counting and l and (l.find('UNRESOLVED:') == 0 or
+                                   l.find('FAIL:') == 0 or l.find('XFAIL:') == 0
+                                   or l.find('XPASS:') == 0):
       n_failures = n_failures + 1
   if not start_counting:
     return -1
@@ -146,8 +150,7 @@
     # email exception? Just log it on console.
     print('Sending email failed - {0}'
           'Subject: {1}'
-          'Text: {2}').format(
-              str(e), subject, email_text)
+          'Text: {2}').format(str(e), subject, email_text)
 
 
 def ProcessArguments(argv):
@@ -156,35 +159,41 @@
       description=('This script is used by nightly client to test gcc. '
                    'DO NOT run it unless you know what you are doing.'),
       usage='test_gcc_dejagnu.py options')
-  parser.add_argument('-b',
-                      '--board',
-                      dest='board',
-                      help=('Required. Specify board type. For example '
-                            '\'lumpy\' and \'daisy\''))
-  parser.add_argument('-r',
-                      '--remote',
-                      dest='remote',
-                      help=('Required. Specify remote board address'))
-  parser.add_argument('-g',
-                      '--gcc_dir',
-                      dest='gcc_dir',
-                      default='gcc.live',
-                      help=('Optional. Specify gcc checkout directory.'))
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      default='chromeos.live',
-                      help=('Optional. Specify chromeos checkout directory.'))
-  parser.add_argument('--cleanup',
-                      dest='cleanup',
-                      default=None,
-                      help=('Optional. Do cleanup after the test.'))
-  parser.add_argument('--runtestflags',
-                      dest='runtestflags',
-                      default=None,
-                      help=('Optional. Options to RUNTESTFLAGS env var '
-                            'while invoking make check. '
-                            '(Mainly used for testing purpose.)'))
+  parser.add_argument(
+      '-b',
+      '--board',
+      dest='board',
+      help=('Required. Specify board type. For example '
+            '\'lumpy\' and \'daisy\''))
+  parser.add_argument(
+      '-r',
+      '--remote',
+      dest='remote',
+      help=('Required. Specify remote board address'))
+  parser.add_argument(
+      '-g',
+      '--gcc_dir',
+      dest='gcc_dir',
+      default='gcc.live',
+      help=('Optional. Specify gcc checkout directory.'))
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      default='chromeos.live',
+      help=('Optional. Specify chromeos checkout directory.'))
+  parser.add_argument(
+      '--cleanup',
+      dest='cleanup',
+      default=None,
+      help=('Optional. Do cleanup after the test.'))
+  parser.add_argument(
+      '--runtestflags',
+      dest='runtestflags',
+      default=None,
+      help=('Optional. Options to RUNTESTFLAGS env var '
+            'while invoking make check. '
+            '(Mainly used for testing purpose.)'))
 
   options = parser.parse_args(argv[1:])
 
diff --git a/test_gdb_dejagnu.py b/test_gdb_dejagnu.py
index 4f44527..c2a4ba9 100755
--- a/test_gdb_dejagnu.py
+++ b/test_gdb_dejagnu.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 """Script adapter used by automation client for testing dejagnu.
 
    This is not intended to be run on command line.
@@ -28,8 +28,10 @@
     self._cmd_exec = command_executer.GetCommandExecuter()
 
   def SetupChromeOS(self):
-    cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
-           '--minilayout', '--jobs=8']
+    cmd = [
+        setup_chromeos.__file__, '--dir=' + self._chromeos_root, '--minilayout',
+        '--jobs=8'
+    ]
     ret = setup_chromeos.Main(cmd)
     if ret:
       raise RuntimeError('Failed to checkout chromeos')
@@ -41,16 +43,17 @@
 
   def SetupBoard(self):
     cmd = './setup_board --board=' + self._board
-    ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
-                                          cmd,
-                                          terminated_timeout=4000)
+    ret = self._cmd_exec.ChrootRunCommand(
+        self._chromeos_root, cmd, terminated_timeout=4000)
     if ret:
       raise RuntimeError('Failed to setup board.')
 
   def CheckGDB(self):
-    args = [gdb_dejagnu.__file__, '--board=' + self._board,
-            '--chromeos_root=' + self._chromeos_root,
-            '--mount=' + self._gdb_dir, '--remote=' + self._remote]
+    args = [
+        gdb_dejagnu.__file__, '--board=' + self._board,
+        '--chromeos_root=' + self._chromeos_root, '--mount=' + self._gdb_dir,
+        '--remote=' + self._remote
+    ]
     if self._cleanup:
       args.append('--cleanup=' + self._cleanup)
     return gdb_dejagnu.Main(args)
@@ -98,8 +101,7 @@
     # email exception? Just log it on console.
     print('Sending email failed - {0}'
           'Subject: {1}'
-          'Text: {2}').format(
-              str(e), subject, email_text)
+          'Text: {2}').format(str(e), subject, email_text)
 
 
 def ProcessArguments(argv):
@@ -108,29 +110,34 @@
       description=('This script is used by nightly client to test gdb. '
                    'DO NOT run it unless you know what you are doing.'),
       usage='test_gdb_dejagnu.py options')
-  parser.add_argument('-b',
-                      '--board',
-                      dest='board',
-                      help=('Required. Specify board type. For example '
-                            '\'lumpy\' and \'daisy\''))
-  parser.add_argument('-r',
-                      '--remote',
-                      dest='remote',
-                      help=('Required. Specify remote board address'))
-  parser.add_argument('-g',
-                      '--gdb_dir',
-                      dest='gdb_dir',
-                      default='',
-                      help=('Optional. Specify gdb checkout directory.'))
-  parser.add_argument('-c',
-                      '--chromeos_root',
-                      dest='chromeos_root',
-                      default='chromeos.live',
-                      help=('Optional. Specify chromeos checkout directory.'))
-  parser.add_argument('--cleanup',
-                      dest='cleanup',
-                      default=None,
-                      help=('Optional. Do cleanup after the test.'))
+  parser.add_argument(
+      '-b',
+      '--board',
+      dest='board',
+      help=('Required. Specify board type. For example '
+            '\'lumpy\' and \'daisy\''))
+  parser.add_argument(
+      '-r',
+      '--remote',
+      dest='remote',
+      help=('Required. Specify remote board address'))
+  parser.add_argument(
+      '-g',
+      '--gdb_dir',
+      dest='gdb_dir',
+      default='',
+      help=('Optional. Specify gdb checkout directory.'))
+  parser.add_argument(
+      '-c',
+      '--chromeos_root',
+      dest='chromeos_root',
+      default='chromeos.live',
+      help=('Optional. Specify chromeos checkout directory.'))
+  parser.add_argument(
+      '--cleanup',
+      dest='cleanup',
+      default=None,
+      help=('Optional. Do cleanup after the test.'))
 
   options = parser.parse_args(argv)
 
diff --git a/test_toolchains.py b/test_toolchains.py
index ecae6f4..8684653 100755
--- a/test_toolchains.py
+++ b/test_toolchains.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 
 # Script to test different toolchains against ChromeOS benchmarks.
 """Toolchain team nightly performance test script (local builds)."""
@@ -11,7 +11,6 @@
 import sys
 import build_chromeos
 import setup_chromeos
-import time
 from cros_utils import command_executer
 from cros_utils import misc
 from cros_utils import logger
diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py
index 9ee7d8b..943dc26 100755
--- a/update_telemetry_defaults.py
+++ b/update_telemetry_defaults.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright 2013 Google Inc. All Rights Reserved.
 """Script to maintain the Telemetry benchmark default results file.
@@ -6,7 +6,6 @@
 This script allows the user to see and update the set of default
 results to be used in generating reports from running the Telemetry
 benchmarks.
-
 """
 
 from __future__ import print_function
@@ -84,8 +83,8 @@
         print("Updated results set for '%s': " % benchmark)
         print('%s : %s' % (benchmark, repr(self._defaults[benchmark])))
       else:
-        print("'%s' is not in '%s's default results list." %
-              (result, benchmark))
+        print("'%s' is not in '%s's default results list." % (result,
+                                                              benchmark))
     else:
       print("Cannot find benchmark named '%s'" % benchmark)
 
diff --git a/user_activity_benchmarks/benchmark_metrics.py b/user_activity_benchmarks/benchmark_metrics.py
deleted file mode 100644
index 30ae31e..0000000
--- a/user_activity_benchmarks/benchmark_metrics.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Computes the metrics for functions, Chrome OS components and benchmarks."""
-
-from collections import defaultdict
-
-
-def ComputeDistanceForFunction(child_functions_statistics_sample,
-                               child_functions_statistics_reference):
-  """Computes the distance metric for a function.
-
-  Args:
-    child_functions_statistics_sample: A dict that has as a key the name of a
-      function and as a value the inclusive count fraction. The keys are
-      the child functions of a sample parent function.
-    child_functions_statistics_reference: A dict that has as a key the name of
-      a function and as a value the inclusive count fraction. The keys are
-      the child functions of a reference parent function.
-
-  Returns:
-    A float value representing the sum of inclusive count fraction
-    differences of pairs of common child functions. If a child function is
-    present in a single data set, then we consider the missing inclusive
-    count fraction as 0. This value describes the difference in behaviour
-    between a sample and the reference parent function.
-  """
-  # We initialize the distance with a small value to avoid the further
-  # division by zero.
-  distance = 1.0
-
-  for child_function, inclusive_count_fraction_reference in \
-      child_functions_statistics_reference.iteritems():
-    inclusive_count_fraction_sample = 0.0
-
-    if child_function in child_functions_statistics_sample:
-      inclusive_count_fraction_sample = \
-          child_functions_statistics_sample[child_function]
-    distance += \
-        abs(inclusive_count_fraction_sample -
-            inclusive_count_fraction_reference)
-
-  for child_function, inclusive_count_fraction_sample in \
-      child_functions_statistics_sample.iteritems():
-    if child_function not in child_functions_statistics_reference:
-      distance += inclusive_count_fraction_sample
-
-  return distance
-
-
-def ComputeScoreForFunction(distance, reference_fraction, sample_fraction):
-  """Computes the score for a function.
-
-  Args:
-    distance: A float value representing the difference in behaviour between
-      the sample and the reference function.
-    reference_fraction: A float value representing the inclusive count
-      fraction of the reference function.
-    sample_fraction: A float value representing the inclusive count
-      fraction of the sample function.
-
-  Returns:
-    A float value representing the score of the function.
-  """
-  return reference_fraction * sample_fraction / distance
-
-
-def ComputeMetricsForComponents(cwp_function_groups, function_metrics):
-  """Computes the metrics for a set of Chrome OS components.
-
-  For every Chrome OS group, we compute the number of functions matching the
-  group, the cumulative and average score, the cumulative and average distance
-  of all those functions. A function matches a group if the path of the file
-  containing its definition contains the common path describing the group.
-
-  Args:
-    cwp_function_groups: A dict having as a key the name of the group and as a
-      value a common path describing the group.
-    function_metrics: A dict having as a key the name of the function and the
-      name of the file where it is declared concatenated by a ',', and as a
-      value a tuple containing the distance and the score metrics.
-
-  Returns:
-    A dict containing as a key the name of the group and as a value a tuple
-    with the group file path, the number of functions matching the group,
-    the cumulative and average score, cumulative and average distance of all
-    those functions.
-  """
-  function_groups_metrics = defaultdict(lambda: (0, 0.0, 0.0, 0.0, 0.0))
-
-  for function_key, metric in function_metrics.iteritems():
-    _, function_file = function_key.split(',')
-
-    for group, common_path in cwp_function_groups:
-      if common_path not in function_file:
-        continue
-
-      function_distance = metric[0]
-      function_score = metric[1]
-      group_statistic = function_groups_metrics[group]
-
-      function_count = group_statistic[1] + 1
-      function_distance_cum = function_distance + group_statistic[2]
-      function_distance_avg = function_distance_cum / float(function_count)
-      function_score_cum = function_score + group_statistic[4]
-      function_score_avg = function_score_cum / float(function_count)
-
-      function_groups_metrics[group] = \
-          (common_path,
-           function_count,
-           function_distance_cum,
-           function_distance_avg,
-           function_score_cum,
-           function_score_avg)
-      break
-
-  return function_groups_metrics
-
-
-def ComputeMetricsForBenchmark(function_metrics):
-  function_count = len(function_metrics.keys())
-  distance_cum = 0.0
-  distance_avg = 0.0
-  score_cum = 0.0
-  score_avg = 0.0
-
-  for distance, score in function_metrics.values():
-    distance_cum += distance
-    score_cum += score
-
-  distance_avg = distance_cum / float(function_count)
-  score_avg = score_cum / float(function_count)
-  return function_count, distance_cum, distance_avg, score_cum, score_avg
-
-
-def ComputeFunctionCountForBenchmarkSet(set_function_metrics, cwp_functions,
-                                        metric_string):
-  """Computes the function count metric pair for the benchmark set.
-
-     For the function count metric, we count the unique functions covered by the
-     set of benchmarks. We compute the fraction of unique functions out
-     of the amount of CWP functions given.
-
-     We compute also the same metric pair for every group from the keys of the
-     set_function_metrics dict.
-
-  Args:
-    set_function_metrics: A list of dicts having as a key the name of a group
-      and as value a list of functions that match the given group.
-    cwp_functions: A dict having as a key the name of the groups and as a value
-      the list of CWP functions that match an individual group.
-    metric_string: A tuple of strings that will be mapped to the tuple of metric
-      values in the returned function group dict. This is done for convenience
-      for the JSON output.
-
-  Returns:
-    A tuple with the metric pair and a dict with the group names and values
-    of the metric pair. The first value of the metric pair represents the
-    function count and the second value the function count fraction.
-    The dict has as a key the name of the group and as a value a dict that
-    maps the metric_string  to the values of the metric pair of the group.
-  """
-  cwp_functions_count = sum(len(functions)
-                            for functions in cwp_functions.itervalues())
-  set_groups_functions = defaultdict(set)
-  for benchmark_function_metrics in set_function_metrics:
-    for group_name in benchmark_function_metrics:
-      set_groups_functions[group_name] |= \
-          set(benchmark_function_metrics[group_name])
-
-  set_groups_functions_count = {}
-  set_functions_count = 0
-  for group_name, functions \
-      in set_groups_functions.iteritems():
-    set_group_functions_count = len(functions)
-    if group_name in cwp_functions:
-      set_groups_functions_count[group_name] = {
-          metric_string[0]: set_group_functions_count,
-          metric_string[1]:
-          set_group_functions_count / float(len(cwp_functions[group_name]))}
-    else:
-      set_groups_functions_count[group_name] = \
-          {metric_string[0]: set_group_functions_count, metric_string[1]: 0.0}
-    set_functions_count += set_group_functions_count
-
-  set_functions_count_fraction = \
-      set_functions_count / float(cwp_functions_count)
-  return (set_functions_count, set_functions_count_fraction), \
-      set_groups_functions_count
-
-
-def ComputeDistanceForBenchmarkSet(set_function_metrics, cwp_functions,
-                                   metric_string):
-  """Computes the distance variation metric pair for the benchmark set.
-
-     For the distance variation metric, we compute the sum of the distance
-     variations of the functions covered by a set of benchmarks.
-     We define the distance variation as the difference between the distance
-     value of a functions and the ideal distance value (1.0).
-     If a function appears in multiple common functions files, we consider
-     only the minimum value. We compute also the distance variation per
-     function.
-
-     In addition, we compute also the same metric pair for every group from
-     the keys of the set_function_metrics dict.
-
-  Args:
-    set_function_metrics: A list of dicts having as a key the name of a group
-      and as value a list of functions that match the given group.
-    cwp_functions: A dict having as a key the name of the groups and as a value
-      the list of CWP functions that match an individual group.
-    metric_string: A tuple of strings that will be mapped to the tuple of metric
-      values in the returned function group dict. This is done for convenience
-      for the JSON output.
-
-  Returns:
-    A tuple with the metric pair and a dict with the group names and values
-    of the metric pair. The first value of the metric pair represents the
-    distance variation per function and the second value the distance variation.
-    The dict has as a key the name of the group and as a value a dict that
-    maps the metric_string to the values of the metric pair of the group.
-  """
-  set_unique_functions = defaultdict(lambda: defaultdict(lambda: float('inf')))
-  set_function_count = 0
-  total_distance_variation = 0.0
-  for benchmark_function_metrics in set_function_metrics:
-    for group_name in benchmark_function_metrics:
-      for function_key, metrics in \
-          benchmark_function_metrics[group_name].iteritems():
-        previous_distance = \
-            set_unique_functions[group_name][function_key]
-        min_distance = min(metrics[0], previous_distance)
-        set_unique_functions[group_name][function_key] = min_distance
-  groups_distance_variations = defaultdict(lambda: (0.0, 0.0))
-  for group_name, functions_distances in set_unique_functions.iteritems():
-    group_function_count = len(functions_distances)
-    group_distance_variation = \
-        sum(functions_distances.itervalues()) - float(group_function_count)
-    total_distance_variation += group_distance_variation
-    set_function_count += group_function_count
-    groups_distance_variations[group_name] = \
-        {metric_string[0]:
-         group_distance_variation / float(group_function_count),
-         metric_string[1]: group_distance_variation}
-
-  return (total_distance_variation / set_function_count,
-          total_distance_variation), groups_distance_variations
-
-
-def ComputeScoreForBenchmarkSet(set_function_metrics, cwp_functions,
-                                metric_string):
-  """Computes the function count metric pair for the benchmark set.
-
-     For the score metric, we compute the sum of the scores of the functions
-     from a set of benchmarks. If a function appears in multiple common
-     functions files, we consider only the maximum value. We compute also the
-     fraction of this sum from the sum of all the scores of the functions from
-     the CWP data covering the given groups, in the ideal case (the ideal
-     score of a function is 1.0).
-
-     In addition, we compute the same metric pair for every group from the
-     keys of the set_function_metrics dict.
-
-  Args:
-    set_function_metrics: A list of dicts having as a key the name of a group
-      and as value a list of functions that match the given group.
-    cwp_functions: A dict having as a key the name of the groups and as a value
-      the list of CWP functions that match an individual group.
-    metric_string: A tuple of strings that will be mapped to the tuple of metric
-      values in the returned function group dict. This is done for convenience
-      for the JSON output.
-
-  Returns:
-    A tuple with the metric pair and a dict with the group names and values
-    of the metric pair. The first value of the pair is the fraction of the sum
-    of the scores from the ideal case and the second value represents the
-    sum of scores of the functions. The dict has as a key the name of the group
-    and as a value a dict that maps the metric_string to the values of the
-    metric pair of the group.
-  """
-  cwp_functions_count = sum(len(functions)
-                            for functions in cwp_functions.itervalues())
-  set_unique_functions = defaultdict(lambda: defaultdict(lambda: 0.0))
-  total_score = 0.0
-
-  for benchmark_function_metrics in set_function_metrics:
-    for group_name in benchmark_function_metrics:
-      for function_key, metrics in \
-          benchmark_function_metrics[group_name].iteritems():
-        previous_score = \
-            set_unique_functions[group_name][function_key]
-        max_score = max(metrics[1], previous_score)
-        set_unique_functions[group_name][function_key] = max_score
-
-  groups_scores = defaultdict(lambda: (0.0, 0.0))
-
-  for group_name, function_scores in set_unique_functions.iteritems():
-    group_function_count = float(len(cwp_functions[group_name]))
-    group_score = sum(function_scores.itervalues())
-    total_score += group_score
-    groups_scores[group_name] = {
-        metric_string[0]: group_score / group_function_count,
-        metric_string[1]: group_score
-    }
-
-  return (total_score / cwp_functions_count, total_score), groups_scores
diff --git a/user_activity_benchmarks/benchmark_metrics_experiment.py b/user_activity_benchmarks/benchmark_metrics_experiment.py
deleted file mode 100755
index e8152e7..0000000
--- a/user_activity_benchmarks/benchmark_metrics_experiment.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/python2
-#
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Runs an experiment with the benchmark metrics on a pair of CWP data sets.
-
-A data set should contain the files with the pairwise inclusive and the
-inclusive statistics. The pairwise inclusive file contains pairs of
-parent and child functions with their inclusive count fractions out of the
-total amount of inclusive count values and the files of the child functions.
-The inclusive file contains the functions with their inclusive count fraction
-out of the total amount of inclusive count values and the file name of the
-function. The input data should be collected using the scripts
-collect_experiment_data.sh or collect_experiment_data_odd_even_session.sh
-
-For every function, this script computes the distance and the score values.
-The output is stored in the file cwp_functions_statistics_file.
-
-For every Chrome OS component, this script computes a set of metrics consisting
-in the number of functions, the average and cumulative distance and score of
-the functions matching the group. The output is stored in the file
-cwp_function_groups_statistics_file.
-"""
-
-import argparse
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class MetricsExperiment(object):
-  """Runs an experiment with the benchmark metrics on a pair of data sets."""
-
-  def __init__(self, cwp_pairwise_inclusive_reference,
-               cwp_pairwise_inclusive_test, cwp_inclusive_reference,
-               cwp_inclusive_test, cwp_function_groups_file,
-               cwp_function_groups_statistics_file,
-               cwp_function_statistics_file):
-    """Initializes the MetricsExperiment class.
-
-    Args:
-      cwp_pairwise_inclusive_reference: The CSV file containing the pairwise
-        inclusive values from the reference data set.
-      cwp_pairwise_inclusive_test: The CSV file containing the pairwise
-        inclusive values from the test data set.
-      cwp_inclusive_reference: The CSV file containing the inclusive values
-        from the reference data set.
-      cwp_inclusive_test: The CSV file containing the inclusive values from
-        the test data set.
-      cwp_function_groups_file: The CSV file containing the groups of functions.
-      cwp_function_groups_statistics_file: The output CSV file that will
-        contain the metrics for the function groups.
-      cwp_function_statistics_file: The output CSV file that will contain the
-        metrics for the CWP functions.
-    """
-    self._cwp_pairwise_inclusive_reference = cwp_pairwise_inclusive_reference
-    self._cwp_pairwise_inclusive_test = cwp_pairwise_inclusive_test
-    self._cwp_inclusive_reference = cwp_inclusive_reference
-    self._cwp_inclusive_test = cwp_inclusive_test
-    self._cwp_function_groups_file = cwp_function_groups_file
-    self._cwp_function_groups_statistics_file = \
-        cwp_function_groups_statistics_file
-    self._cwp_function_statistics_file = cwp_function_statistics_file
-
-  def PerformComputation(self):
-    """Does the benchmark metrics experimental computation.
-
-    For every function, it is computed a distance based on the sum of the
-    differences of the fractions spent in the child functions. Afterwards,
-    it is computed a score based on the inclusive values fractions and the
-    distance value. The statistics for all the function are written in the file
-    self._cwp_function_statistics_file.
-
-    The functions are grouped on Chrome OS components based on the path of the
-    file where a function is defined. For every group, there are computed the
-    total number of functions matching that group, the cumulative distance, the
-    average distance and the cumulative score of the functions.
-    """
-
-    inclusive_statistics_reference = \
-        utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_reference)
-    inclusive_statistics_cum_reference = \
-        utils.ComputeCWPCummulativeInclusiveStatistics(
-            inclusive_statistics_reference)
-    inclusive_statistics_test = \
-        utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_test)
-    inclusive_statistics_cum_test = \
-        utils.ComputeCWPCummulativeInclusiveStatistics(
-            inclusive_statistics_test)
-    pairwise_inclusive_statistics_reference = \
-        utils.ParseCWPPairwiseInclusiveCountFile(
-            self._cwp_pairwise_inclusive_reference)
-    pairwise_inclusive_fractions_reference = \
-        utils.ComputeCWPChildFunctionsFractions(
-            inclusive_statistics_cum_reference,
-            pairwise_inclusive_statistics_reference)
-    pairwise_inclusive_statistics_test = \
-        utils.ParseCWPPairwiseInclusiveCountFile(
-            self._cwp_pairwise_inclusive_test)
-    pairwise_inclusive_fractions_test = \
-        utils.ComputeCWPChildFunctionsFractions(
-            inclusive_statistics_cum_test,
-            pairwise_inclusive_statistics_test)
-    parent_function_statistics = {}
-
-    with open(self._cwp_function_groups_file) as input_file:
-      cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
-
-    for parent_function_key, parent_function_statistics_test \
-        in inclusive_statistics_test.iteritems():
-      parent_function_name, _ = parent_function_key.split(',')
-      parent_function_fraction_test = parent_function_statistics_test[2]
-
-      parent_function_fraction_reference = \
-          inclusive_statistics_reference[parent_function_key][2]
-
-      child_functions_fractions_test = \
-          pairwise_inclusive_fractions_test.get(parent_function_name, {})
-
-      child_functions_fractions_reference = \
-          pairwise_inclusive_fractions_reference.get(parent_function_name, {})
-
-      distance = benchmark_metrics.ComputeDistanceForFunction(
-          child_functions_fractions_test, child_functions_fractions_reference)
-
-      parent_function_score_test = benchmark_metrics.ComputeScoreForFunction(
-          distance, parent_function_fraction_test,
-          parent_function_fraction_reference)
-
-      parent_function_statistics[parent_function_key] = \
-          (distance, parent_function_score_test)
-
-    with open(self._cwp_function_statistics_file, 'w') as output_file:
-      statistics_lines = ['function,file,distance,score']
-      statistics_lines += \
-          [','.join([parent_function_key.replace(';;', ','),
-                     str(statistic[0]),
-                     str(statistic[1])])
-           for parent_function_key, statistic
-           in parent_function_statistics.iteritems()]
-      output_file.write('\n'.join(statistics_lines))
-
-    cwp_groups_statistics_test = benchmark_metrics.ComputeMetricsForComponents(
-        cwp_function_groups, parent_function_statistics)
-
-    with open(self._cwp_function_groups_statistics_file, 'w') as output_file:
-      group_statistics_lines = \
-          ['group,file_path,function_count,distance_cum,distance_avg,score_cum,'
-           'score_avg']
-      group_statistics_lines += \
-          [','.join([group_name,
-                     str(statistic[0]),
-                     str(statistic[1]),
-                     str(statistic[2]),
-                     str(statistic[3]),
-                     str(statistic[4]),
-                     str(statistic[5])])
-           for group_name, statistic
-           in cwp_groups_statistics_test.iteritems()]
-      output_file.write('\n'.join(group_statistics_lines))
-
-
-def ParseArguments(arguments):
-  parser = argparse.ArgumentParser(
-      description='Runs an experiment with the benchmark metrics on a pair of '
-      'CWP data sets.')
-  parser.add_argument(
-      '--cwp_pairwise_inclusive_reference',
-      required=True,
-      help='The reference CSV file that will contain a pair of parent and '
-      'child functions with their inclusive count fractions out of the total '
-      'amount of inclusive count values.')
-  parser.add_argument(
-      '--cwp_pairwise_inclusive_test',
-      required=True,
-      help='The test CSV file that will contain a pair of parent and '
-      'child functions with their inclusive count fractions out of the total '
-      'amount of inclusive count values.')
-  parser.add_argument(
-      '--cwp_inclusive_reference',
-      required=True,
-      help='The reference CSV file that will contain a function with its '
-      'inclusive count fraction out of the total amount of inclusive count '
-      'values.')
-  parser.add_argument(
-      '--cwp_inclusive_test',
-      required=True,
-      help='The test CSV file that will contain a function with its '
-      'inclusive count fraction out of the total amount of inclusive count '
-      'values.')
-  parser.add_argument(
-      '-g',
-      '--cwp_function_groups_file',
-      required=True,
-      help='The file that will contain the CWP function groups.'
-      'A line consists in the group name and a file path. A group must '
-      'represent a ChromeOS component.')
-  parser.add_argument(
-      '-s',
-      '--cwp_function_groups_statistics_file',
-      required=True,
-      help='The output file that will contain the metric statistics for the '
-      'CWP function groups in CSV format. A line consists in the group name, '
-      'file path, number of functions matching the group, the total score '
-      'and distance values.')
-  parser.add_argument(
-      '-f',
-      '--cwp_function_statistics_file',
-      required=True,
-      help='The output file that will contain the metric statistics for the '
-      'CWP functions in CSV format. A line consists in the function name, file '
-      'name, cummulative distance, average distance, cummulative score and '
-      'average score values.')
-
-  options = parser.parse_args(arguments)
-  return options
-
-
-def Main(argv):
-  options = ParseArguments(argv)
-  metrics_experiment = MetricsExperiment(
-      options.cwp_pairwise_inclusive_reference,
-      options.cwp_pairwise_inclusive_test, options.cwp_inclusive_reference,
-      options.cwp_inclusive_test, options.cwp_function_groups_file,
-      options.cwp_function_groups_statistics_file,
-      options.cwp_function_statistics_file)
-  metrics_experiment.PerformComputation()
-
-
-if __name__ == '__main__':
-  Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py b/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py
deleted file mode 100755
index c4755ef..0000000
--- a/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the benchmark_metrics_experiment module."""
-
-import os
-import tempfile
-import unittest
-
-from benchmark_metrics_experiment import MetricsExperiment
-
-
-class MetricsExperimentTest(unittest.TestCase):
-  """Test class for MetricsExperiment class."""
-
-  def __init__(self, *args, **kwargs):
-    super(MetricsExperimentTest, self).__init__(*args, **kwargs)
-    self._pairwise_inclusive_count_test_file = \
-        'testdata/input/pairwise_inclusive_count_test.csv'
-    self._pairwise_inclusive_count_reference_file = \
-        'testdata/input/pairwise_inclusive_count_reference.csv'
-    self._inclusive_count_test_file = \
-        'testdata/input/inclusive_count_test.csv'
-    self._inclusive_count_reference_file = \
-        'testdata/input/inclusive_count_reference.csv'
-    self._cwp_function_groups_file = \
-        'testdata/input/cwp_function_groups.txt'
-
-  def _CheckFileContents(self, file_name, expected_content_lines):
-    with open(file_name) as input_file:
-      result_content_lines = input_file.readlines()
-      self.assertListEqual(expected_content_lines, result_content_lines)
-
-  def testExperiment(self):
-    group_statistics_file, group_statistics_filename = tempfile.mkstemp()
-
-    os.close(group_statistics_file)
-
-    function_statistics_file, function_statistics_filename = tempfile.mkstemp()
-
-    os.close(function_statistics_file)
-
-
-    expected_group_statistics_lines = \
-        ['group,file_path,function_count,distance_cum,distance_avg,score_cum,'
-         'score_avg\n',
-         'ab,/a/b,2.0,3.01,1.505,8.26344228895,4.13172114448\n',
-         'e,/e,2.0,2.0,1.0,27.5,13.75\n',
-         'cd,/c/d,2.0,2.0,1.0,27.5,13.75']
-    expected_function_statistics_lines = \
-        ['function,file,distance,score\n',
-         'func_i,/c/d/file_i,1.0,17.6\n',
-         'func_j,/e/file_j,1.0,27.5\n',
-         'func_f,/a/b/file_f,1.59,1.4465408805\n',
-         'func_h,/c/d/file_h,1.0,9.9\n',
-         'func_k,/e/file_k,1.0,0.0\n',
-         'func_g,/a/b/file_g,1.42,6.81690140845']
-    metric_experiment = \
-        MetricsExperiment(self._pairwise_inclusive_count_reference_file,
-                          self._pairwise_inclusive_count_test_file,
-                          self._inclusive_count_reference_file,
-                          self._inclusive_count_test_file,
-                          self._cwp_function_groups_file,
-                          group_statistics_filename,
-                          function_statistics_filename)
-
-    metric_experiment.PerformComputation()
-    self._CheckFileContents(group_statistics_filename,
-                            expected_group_statistics_lines)
-    self._CheckFileContents(function_statistics_filename,
-                            expected_function_statistics_lines)
-    os.remove(group_statistics_filename)
-    os.remove(function_statistics_filename)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/user_activity_benchmarks/benchmark_metrics_unittest.py b/user_activity_benchmarks/benchmark_metrics_unittest.py
deleted file mode 100755
index a48361f..0000000
--- a/user_activity_benchmarks/benchmark_metrics_unittest.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the benchmark_metrics module."""
-
-import mock
-import unittest
-import benchmark_metrics
-
-
-class MetricsComputationTest(unittest.TestCase):
-  """Test class for MetricsComputation class."""
-
-  def __init__(self, *args, **kwargs):
-    super(MetricsComputationTest, self).__init__(*args, **kwargs)
-
-  def testComputeDistanceForFunction(self):
-    child_functions_statistics_sample = {
-        'f,file_f': 0.1,
-        'g,file_g': 0.2,
-        'h,file_h': 0.3,
-        'i,file_i': 0.4
-    }
-    child_functions_statistics_reference = {
-        'f,file_f': 0.4,
-        'i,file_i': 0.4,
-        'h,file_h2': 0.2
-    }
-    distance = benchmark_metrics.ComputeDistanceForFunction(
-        child_functions_statistics_sample, child_functions_statistics_reference)
-    self.assertEqual(distance, 2.0)
-
-    distance = benchmark_metrics.ComputeDistanceForFunction({}, {})
-    self.assertEqual(distance, 1.0)
-
-    distance = benchmark_metrics.ComputeDistanceForFunction(
-        child_functions_statistics_sample, {})
-    self.assertEqual(distance, 2.0)
-
-    distance = benchmark_metrics.ComputeDistanceForFunction(
-        {}, child_functions_statistics_reference)
-    self.assertEqual(distance, 2.0)
-
-  def testComputeScoreForFunction(self):
-    score = benchmark_metrics.ComputeScoreForFunction(1.2, 0.3, 0.4)
-    self.assertEqual(score, 0.1)
-
-  def testComputeMetricsForComponents(self):
-    function_metrics = {
-        'func_f,/a/b/file_f': (1.0, 2.3),
-        'func_g,/a/b/file_g': (1.1, 1.5),
-        'func_h,/c/d/file_h': (2.0, 1.7),
-        'func_i,/c/d/file_i': (1.9, 1.8),
-        'func_j,/c/d/file_j': (1.8, 1.9),
-        'func_k,/e/file_k': (1.2, 2.1),
-        'func_l,/e/file_l': (1.3, 3.1)
-    }
-    cwp_function_groups = [('ab', '/a/b'), ('cd', '/c/d'), ('e', '/e')]
-    expected_metrics = {'ab': ('/a/b', 2.0, 2.1, 1.05, 3.8, 1.9),
-                        'e': ('/e', 2.0, 2.5, 1.25, 5.2, 2.6),
-                        'cd': ('/c/d', 3.0, 5.7, 1.9000000000000001, 5.4, 1.8)}
-    result_metrics = benchmark_metrics.ComputeMetricsForComponents(
-        cwp_function_groups, function_metrics)
-
-    self.assertDictEqual(expected_metrics, result_metrics)
-
-  def testComputeMetricsForBenchmark(self):
-    function_metrics = {'func_f': (1.0, 2.0),
-                        'func_g': (1.1, 2.1),
-                        'func_h': (1.2, 2.2),
-                        'func_i': (1.3, 2.3)}
-    expected_benchmark_metrics = \
-        (4, 4.6000000000000005, 1.1500000000000001, 8.6, 2.15)
-    result_benchmark_metrics = \
-        benchmark_metrics.ComputeMetricsForBenchmark(function_metrics)
-
-    self.assertEqual(expected_benchmark_metrics, result_benchmark_metrics)
-
-  def testComputeMetricsForBenchmarkSet(self):
-    """TODO(evelinad): Add unit test for ComputeMetricsForBenchmarkSet."""
-    pass
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/user_activity_benchmarks/collect_experiment_data.sh b/user_activity_benchmarks/collect_experiment_data.sh
deleted file mode 100755
index a76cec8..0000000
--- a/user_activity_benchmarks/collect_experiment_data.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses Dremel queries to collect the inclusive and pairwise inclusive
-# statistics.
-
-set -e
-
-if [ "$#" -ne 7 ]; then
-  echo "USAGE: collect_experiment_data.sh cwp_table board board_arch " \
-  "Chrome_version Chrome_OS_version inclusive_output_file " \
-  "pairwise_inclusive_output_file"
-  exit 1
-fi
-
-readonly TABLE=$1
-readonly INCLUSIVE_OUTPUT_FILE=$6
-readonly PAIRWISE_INCLUSIVE_OUTPUT_FILE=$7
-readonly PERIODIC_COLLECTION=1
-readonly WHERE_CLAUSE_SPECIFICATIONS="meta.cros.board = '$2' AND  \
-  meta.cros.cpu_architecture = '$3' AND  \
-  meta.cros.chrome_version LIKE '%$4%' AND  \
-  meta.cros.version = '$5' AND \
-  meta.cros.collection_info.trigger_event = $PERIODIC_COLLECTION AND \
-  session.total_count > 2000"
-
-# Collects the function, with its file, the object and inclusive count
-# fraction out of the total amount of inclusive count values.
-echo "
-SELECT
-  replace(frame.function_name, \", \", \"; \") AS function,
-  frame.filename AS file,
-  frame.load_module_path AS dso,
-  SUM(frame.inclusive_count) AS inclusive_count,
-  SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count_fraction
-FROM
-  $TABLE table,
-  table.frame frame
-CROSS JOIN (
-  SELECT
-    SUM(count) AS value
-  FROM
-    $TABLE
-  WHERE
-    $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
-  $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
-  function,
-  file,
-  dso
-HAVING
-  inclusive_count_fraction > 0.0
-ORDER BY
-  inclusive_count_fraction DESC;
-" | dremel  --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv \
-  > "$INCLUSIVE_OUTPUT_FILE"
-
-# Collects the pair of parent and child functions, with the file and object
-# where the child function is declared and the inclusive count fraction of the
-# pair out of the total amount of inclusive count values.
-echo "
-SELECT
-  CONCAT(replace(frame.parent_function_name, \", \", \"; \"), \";;\",
-    replace(frame.function_name, \", \", \"; \")) AS parent_child_functions,
-  frame.filename AS child_function_file,
-  frame.load_module_path AS child_function_dso,
-  SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count
-FROM
-  $TABLE table,
-  table.frame frame
-CROSS JOIN (
-  SELECT
-    SUM(count) AS value
-  FROM $TABLE
-  WHERE
-    $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
-  $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
-  parent_child_functions,
-  child_function_file,
-  child_function_dso
-HAVING
-  inclusive_count > 0.0
-ORDER BY
-  inclusive_count DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
-  "$PAIRWISE_INCLUSIVE_OUTPUT_FILE"
diff --git a/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh b/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh
deleted file mode 100755
index 900e582..0000000
--- a/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses Dremel queries to collect the inclusive and pairwise inclusive statistics
-# for odd/even profile collection session ids.
-# The data is collected for an odd or even collection session id.
-
-set -e
-
-if [ $# -lt 8 ]; then
-  echo "Usage: collect_experiment_data_odd_even_session.sh cwp_table board " \
-    "board_arch Chrome_version Chrome_OS_version odd_even " \
-    "inclusive_output_file pairwise_inclusive_output_file"
-  exit 1
-fi
-
-readonly TABLE=$1
-readonly INCLUSIVE_OUTPUT_FILE=$7
-readonly PAIRWISE_INCLUSIVE_OUTPUT_FILE=$8
-readonly PERIODIC_COLLECTION=1
-WHERE_CLAUSE_SPECIFICATIONS="meta.cros.board = '$2' AND \
-  meta.cros.cpu_architecture = '$3' AND \
-  meta.cros.chrome_version LIKE '%$4%' AND \
-  meta.cros.version = '$5' AND \
-  meta.cros.collection_info.trigger_event = $PERIODIC_COLLECTION AND \
-  MOD(session.id, 2) = $6 AND \
-  session.total_count > 2000"
-
-# Collects the function, with its file, the object and inclusive count
-# fraction out of the total amount of inclusive count values.
-echo "
-SELECT
-  replace(frame.function_name, \", \", \"; \") AS function,
-  frame.filename AS file,
-  frame.load_module_path AS dso,
-  SUM(frame.inclusive_count) AS inclusive_count,
-  SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count_fraction
-FROM
-  $TABLE table,
-  table.frame frame
-CROSS JOIN (
-  SELECT
-    SUM(count) AS value
-  FROM $TABLE
-  WHERE
-    $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
-    $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
-  function,
-  file,
-  dso
-HAVING
-  inclusive_count_fraction > 0.0
-ORDER BY
-  inclusive_count_fraction DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
-  "$INCLUSIVE_OUTPUT_FILE"
-
-# Collects the pair of parent and child functions, with the file and object
-# where the child function is declared and the inclusive count fraction of the
-# pair out of the total amount of inclusive count values.
-echo "
-SELECT
-  CONCAT(replace(frame.parent_function_name, \", \", \"; \"), \";;\",
-    replace(frame.function_name, \", \", \"; \")) AS parent_child_functions,
-  frame.filename AS child_function_file,
-  frame.load_module_path AS child_function_dso,
-  SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count
-FROM
-  $TABLE table,
-  table.frame frame
-CROSS JOIN (
-  SELECT
-    SUM(count) AS value
-  FROM
-    $TABLE
-  WHERE
-    $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
-  $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
-  parent_child_functions,
-  child_function_file,
-  child_function_dso
-HAVING
-  inclusive_count > 0.0
-ORDER BY
-  inclusive_count DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
-  "$PAIRWISE_INCLUSIVE_OUTPUT_FILE"
diff --git a/user_activity_benchmarks/collect_pprof_data.sh b/user_activity_benchmarks/collect_pprof_data.sh
deleted file mode 100755
index 5b89f18..0000000
--- a/user_activity_benchmarks/collect_pprof_data.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Collects the pprof tree and top outputs.
-# All the local_cwp symbolized profiles are taken from the
-# local_cwp_results_path.
-# The pprof top output is stored in the pprof_top_results_path and the pprof
-# tree output is stored in the pprof_tree_results_path.
-
-set -e
-
-if [ "$#" -ne 3 ]; then
-  echo "USAGE: collect_pprof_data.sh local_cwp_results_path " \
-    "pprof_top_results_path pprof_tree_results_path"
-  exit 1
-fi
-
-readonly LOCAL_CWP_RESULTS_PATH=$1
-readonly PPROF_TOP_RESULTS_PATH=$2
-readonly PPROF_TREE_RESULTS_PATH=$3
-readonly SYMBOLIZED_PROFILES=`ls $LOCAL_CWP_RESULTS_PATH`
-
-for symbolized_profile in "${SYMBOLIZED_PROFILES[@]}"
-do
-  pprof --top "$LOCAL_CWP_RESULTS_PATH/${symbolized_profile}" > \
-    "$PPROF_TOP_RESULTS_PATH/${symbolized_profile}.pprof"
-  if [ $? -ne 0 ]; then
-    echo "Failed to extract the pprof top output for the $symbolized_profile."
-    continue
-  fi
-
-  pprof --tree "$LOCAL_CWP_RESULTS_PATH/${symbolized_profile}" > \
-    "$PPROF_TREE_RESULTS_PATH/${symbolized_profile}.pprof"
-  if [ $? -ne 0 ]; then
-    echo "Failed to extract the pprof tree output for the " \
-      "$symbolized_profile."
-    continue
-  fi
-done
diff --git a/user_activity_benchmarks/collect_telemetry_profiles.sh b/user_activity_benchmarks/collect_telemetry_profiles.sh
deleted file mode 100755
index 0583adc..0000000
--- a/user_activity_benchmarks/collect_telemetry_profiles.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Runs the Telemetry benchmarks with AutoTest and collects their perf profiles.
-# Reads the benchmark names from the telemetry_benchmark_file. Each benchmark
-# should be placed on a separate line.
-# The profile results are placed in the results_path.
-
-set -e
-
-if [ "$#" -ne 5 ]; then
-  echo "USAGE: collect_telemetry_profiles.sh board chrome_root_path " \
-  "machine_ip results_path telemetry_benchmarks_file"
-  exit 1
-fi
-
-# CHROME_ROOT should contain the path with the source of Chrome. This is used by
-# AutoTest.
-export CHROME_ROOT=$2
-
-readonly BOARD=$1
-readonly IP=$3
-readonly RESULTS_PATH=$4
-readonly TELEMETRY_BENCHMARKS_FILE=$5
-
-# The following Telemetry benchmarks failed for the R52-8350.68.0 Chrome OS
-# version: page_cycler_v2.top_10_mobile,
-# page_cycler_v2.basic_oopif, smoothness.tough_filters_cases,
-# page_cycler_v2.intl_hi_ru,
-# image_decoding.image_decoding_measurement, system_health.memory_mobile,
-# memory.top_7_stress, smoothness.tough_path_rendering_cases,
-# page_cycler_v2.tough_layout_cases,
-# memory.long_running_idle_gmail_background_tbmv2, smoothness.tough_webgl_cases,
-# smoothness.tough_canvas_cases, smoothness.tough_texture_upload_cases,
-# top_10_mobile_memory_ignition, startup.large_profile.cold.blank_page,
-# page_cycler_v2.intl_ar_fa_he, start_with_ext.cold.blank_page,
-# start_with_ext.warm.blank_page, page_cycler_v2.intl_ko_th_vi,
-# smoothness.scrolling_tough_ad_case, page_cycler_v2_site_isolation.basic_oopif,
-# smoothness.tough_scrolling_cases, startup.large_profile.warm.blank_page,
-# page_cycler_v2.intl_es_fr_pt-BR, page_cycler_v2.intl_ja_zh,
-# memory.long_running_idle_gmail_tbmv2, smoothness.scrolling_tough_ad_cases,
-# page_cycler_v2.typical_25, smoothness.tough_webgl_ad_cases,
-# smoothness.tough_image_decode_cases.
-#
-# However, we did not manage to collect the profiles only from the following
-# benchmarks: smoothness.tough_filters_cases,
-# smoothness.tough_path_rendering_cases, page_cycler_v2.tough_layout_cases,
-# smoothness.tough_webgl_cases, smoothness.tough_canvas_cases,
-# smoothness.tough_texture_upload_cases, smoothness.tough_scrolling_cases,
-# smoothness.tough_webgl_ad_cases, smoothness.tough_image_decode_cases.
-#
-# Use ./run_benchmark --browser=cros-chrome --remote=$IP list to get the list of
-# Telemetry benchmarks.
-readonly LATEST_PERF_PROFILE=/tmp/test_that_latest/results-1-telemetry_Crosperf/telemetry_Crosperf/profiling/perf.data
-
-while read benchmark
-do
-  # TODO(evelinad): We should add -F 4000000 to the list of profiler_args
-  # arguments because we need to use the same sampling period as the one used
-  # to collect the CWP user data (4M number of cycles for cycles.callgraph).
-  test_that --debug  --board=${BOARD} --args=" profiler=custom_perf \
-    profiler_args='record -g -a -e cycles,instructions' \
-    run_local=False test=$benchmark " $IP telemetry_Crosperf
-  if [ $? -ne 0 ]; then
-    echo "Failed to run the $benchmark telemetry benchmark with Autotest."
-    continue
-  fi
-  echo "Warning: Sampling period is too high. It should be set to 4M samples."
-
-  cp "$LATEST_PERF_PROFILE" "$RESULTS_PATH/${benchmark}.data"
-  if [ $? -ne 0 ]; then
-    echo "Failed to move the perf profile file from $LATEST_PERF_PROFILE to " \
-      "$PERF_DATA_RESULTS_PATH/${benchmark}.data for the $benchmark " \
-      "telemetry benchmark."
-    continue
-  fi
-
-  # The ssh connection should be configured without password. We need to do
-  # this step because we might run out of disk space if we run multiple
-  # benchmarks.
-  ssh root@$IP "rm -rf /usr/local/profilers/*"
-  if [ $? -ne 0 ]; then
-    echo "Failed to remove the output files from /usr/local/profilers/ for " \
-      "the $benchmark telemetry benchmark."
-    continue
-  fi
-done < $TELEMETRY_BENCHMARKS_FILE
-
diff --git a/user_activity_benchmarks/cwp_hot_functions_groups.txt b/user_activity_benchmarks/cwp_hot_functions_groups.txt
deleted file mode 100644
index 3a1f893..0000000
--- a/user_activity_benchmarks/cwp_hot_functions_groups.txt
+++ /dev/null
@@ -1,314 +0,0 @@
-third_party_accessibility_audit home/chrome-bot/chrome_root/src/third_party/accessibility-audit
-third_party_accessibility_test_framework home/chrome-bot/chrome_root/src/third_party/accessibility_test_framework
-third_party_adobe home/chrome-bot/chrome_root/src/third_party/adobe
-third_party_afl home/chrome-bot/chrome_root/src/third_party/afl
-third_party_analytics home/chrome-bot/chrome_root/src/third_party/analytics
-third_party_android_async_task home/chrome-bot/chrome_root/src/third_party/android_async_task
-third_party_android_crazy_linker home/chrome-bot/chrome_root/src/third_party/android_crazy_linker
-third_party_android_data_chart home/chrome-bot/chrome_root/src/third_party/android_data_chart
-third_party_android_media home/chrome-bot/chrome_root/src/third_party/android_media
-third_party_android_opengl home/chrome-bot/chrome_root/src/third_party/android_opengl
-third_party_android_platform home/chrome-bot/chrome_root/src/third_party/android_platform
-third_party_android_protobuf home/chrome-bot/chrome_root/src/third_party/android_protobuf
-third_party_android_support_test_runner home/chrome-bot/chrome_root/src/third_party/android_support_test_runner
-third_party_android_swipe_refresh home/chrome-bot/chrome_root/src/third_party/android_swipe_refresh
-third_party_angle home/chrome-bot/chrome_root/src/third_party/angle
-third_party_apache-portable-runtime home/chrome-bot/chrome_root/src/third_party/apache-portable-runtime
-third_party_apache_velocity home/chrome-bot/chrome_root/src/third_party/apache_velocity
-third_party_apache-win32 home/chrome-bot/chrome_root/src/third_party/apache-win32
-third_party_apple_apsl home/chrome-bot/chrome_root/src/third_party/apple_apsl
-third_party_apple_sample_code home/chrome-bot/chrome_root/src/third_party/apple_sample_code
-third_party_appurify-python home/chrome-bot/chrome_root/src/third_party/appurify-python
-third_party_ashmem home/chrome-bot/chrome_root/src/third_party/ashmem
-third_party_bidichecker home/chrome-bot/chrome_root/src/third_party/bidichecker
-third_party_bintrees home/chrome-bot/chrome_root/src/third_party/bintrees
-third_party_binutils home/chrome-bot/chrome_root/src/third_party/binutils
-third_party_blanketjs home/chrome-bot/chrome_root/src/third_party/blanketjs
-third_party_blimp_fonts home/chrome-bot/chrome_root/src/third_party/blimp_fonts
-third_party_boringssl home/chrome-bot/chrome_root/src/third_party/boringssl
-third_party_boringssl home/chrome-bot/chrome_root/src/third_party/boringssl/
-third_party_bouncycastle home/chrome-bot/chrome_root/src/third_party/bouncycastle
-third_party_brotli home/chrome-bot/chrome_root/src/third_party/brotli
-third_party_bspatch home/chrome-bot/chrome_root/src/third_party/bspatch
-third_party_cacheinvalidation home/chrome-bot/chrome_root/src/third_party/cacheinvalidation
-third_party_cardboard-java home/chrome-bot/chrome_root/src/third_party/cardboard-java
-third_party_catapult home/chrome-bot/chrome_root/src/third_party/catapult
-third_party_ced home/chrome-bot/chrome_root/src/third_party/ced
-third_party_chaijs home/chrome-bot/chrome_root/src/third_party/chaijs
-third_party_checkstyle home/chrome-bot/chrome_root/src/third_party/checkstyle
-third_party_chromite home/chrome-bot/chrome_root/src/third_party/chromite
-third_party_class-dump home/chrome-bot/chrome_root/src/third_party/class-dump
-third_party_cld_2 home/chrome-bot/chrome_root/src/third_party/cld_2
-third_party_cld_3 home/chrome-bot/chrome_root/src/third_party/cld_3
-third_party_closure_compiler home/chrome-bot/chrome_root/src/third_party/closure_compiler
-third_party_closure_linter home/chrome-bot/chrome_root/src/third_party/closure_linter
-third_party_codesighs home/chrome-bot/chrome_root/src/third_party/codesighs
-third_party_colorama home/chrome-bot/chrome_root/src/third_party/colorama
-third_party_crashpad home/chrome-bot/chrome_root/src/third_party/crashpad
-third_party_cros_system_api home/chrome-bot/chrome_root/src/third_party/cros_system_api
-third_party_custom_tabs_client home/chrome-bot/chrome_root/src/third_party/custom_tabs_client
-third_party_cython home/chrome-bot/chrome_root/src/third_party/cython
-third_party_d3 home/chrome-bot/chrome_root/src/third_party/d3
-third_party_decklink home/chrome-bot/chrome_root/src/third_party/decklink
-third_party_deqp home/chrome-bot/chrome_root/src/third_party/deqp
-third_party_devscripts home/chrome-bot/chrome_root/src/third_party/devscripts
-third_party_dom_distiller_js home/chrome-bot/chrome_root/src/third_party/dom_distiller_js
-third_party_drmemory home/chrome-bot/chrome_root/src/third_party/drmemory
-third_party_elfutils home/chrome-bot/chrome_root/src/third_party/elfutils
-third_party_errorprone home/chrome-bot/chrome_root/src/third_party/errorprone
-third_party_espresso home/chrome-bot/chrome_root/src/third_party/espresso
-third_party_expat home/chrome-bot/chrome_root/src/third_party/expat
-third_party_ffmpeg home/chrome-bot/chrome_root/src/third_party/ffmpeg
-third_party_fips181 home/chrome-bot/chrome_root/src/third_party/fips181
-third_party_flac home/chrome-bot/chrome_root/src/third_party/flac
-third_party_flatbuffers home/chrome-bot/chrome_root/src/third_party/flatbuffers
-third_party_flot home/chrome-bot/chrome_root/src/third_party/flot
-third_party_fontconfig home/chrome-bot/chrome_root/src/third_party/fontconfig
-third_party_freetype2 home/chrome-bot/chrome_root/src/third_party/freetype2
-third_party_freetype-android home/chrome-bot/chrome_root/src/third_party/freetype-android
-third_party_fuzzymatch home/chrome-bot/chrome_root/src/third_party/fuzzymatch
-third_party_gardiner_mod home/chrome-bot/chrome_root/src/third_party/gardiner_mod
-third_party_gif_player home/chrome-bot/chrome_root/src/third_party/gif_player
-third_party_gles2_conform home/chrome-bot/chrome_root/src/third_party/gles2_conform
-third_party_glslang home/chrome-bot/chrome_root/src/third_party/glslang
-third_party_google_appengine_cloudstorage home/chrome-bot/chrome_root/src/third_party/google_appengine_cloudstorage
-third_party_google_input_tools home/chrome-bot/chrome_root/src/third_party/google_input_tools
-third_party_google_toolbox_for_mac home/chrome-bot/chrome_root/src/third_party/google_toolbox_for_mac
-third_party_grpc home/chrome-bot/chrome_root/src/third_party/grpc
-third_party_guava home/chrome-bot/chrome_root/src/third_party/guava
-third_party_haha home/chrome-bot/chrome_root/src/third_party/haha
-third_party_hamcrest home/chrome-bot/chrome_root/src/third_party/hamcrest
-third_party_harfbuzz-ng home/chrome-bot/chrome_root/src/third_party/harfbuzz-ng
-third_party_hunspell home/chrome-bot/chrome_root/src/third_party/hunspell
-third_party_hunspell_dictionaries home/chrome-bot/chrome_root/src/third_party/hunspell_dictionaries
-third_party_hwcplus home/chrome-bot/chrome_root/src/third_party/hwcplus
-third_party_iaccessible2 home/chrome-bot/chrome_root/src/third_party/iaccessible2
-third_party_iccjpeg home/chrome-bot/chrome_root/src/third_party/iccjpeg
-third_party_icu home/chrome-bot/chrome_root/src/third_party/icu
-third_party_icu4j home/chrome-bot/chrome_root/src/third_party/icu4j
-third_party_ijar home/chrome-bot/chrome_root/src/third_party/ijar
-third_party_instrumented_libraries home/chrome-bot/chrome_root/src/third_party/instrumented_libraries
-third_party_intellij home/chrome-bot/chrome_root/src/third_party/intellij
-third_party_isimpledom home/chrome-bot/chrome_root/src/third_party/isimpledom
-third_party_javax_inject home/chrome-bot/chrome_root/src/third_party/javax_inject
-third_party_jinja2 home/chrome-bot/chrome_root/src/third_party/jinja2
-third_party_jmake home/chrome-bot/chrome_root/src/third_party/jmake
-third_party_jsoncpp home/chrome-bot/chrome_root/src/third_party/jsoncpp
-third_party_jsr-305 home/chrome-bot/chrome_root/src/third_party/jsr-305
-third_party_jstemplate home/chrome-bot/chrome_root/src/third_party/jstemplate
-third_party_junit home/chrome-bot/chrome_root/src/third_party/junit
-third_party_kasko home/chrome-bot/chrome_root/src/third_party/kasko
-third_party_khronos home/chrome-bot/chrome_root/src/third_party/khronos
-third_party_khronos_glcts home/chrome-bot/chrome_root/src/third_party/khronos_glcts
-third_party_lcov home/chrome-bot/chrome_root/src/third_party/lcov
-third_party_leakcanary home/chrome-bot/chrome_root/src/third_party/leakcanary
-third_party_leveldatabase home/chrome-bot/chrome_root/src/third_party/leveldatabase
-third_party_libaddressinput home/chrome-bot/chrome_root/src/third_party/libaddressinput
-third_party_libc++-static home/chrome-bot/chrome_root/src/third_party/libc++-static
-third_party_libFuzzer home/chrome-bot/chrome_root/src/third_party/libFuzzer
-third_party_libjingle home/chrome-bot/chrome_root/src/third_party/libjingle
-third_party_libjpeg home/chrome-bot/chrome_root/src/third_party/libjpeg
-third_party_libjpeg_turbo home/chrome-bot/chrome_root/src/third_party/libjpeg_turbo
-third_party_liblouis home/chrome-bot/chrome_root/src/third_party/liblouis
-third_party_libphonenumber home/chrome-bot/chrome_root/src/third_party/libphonenumber
-third_party_libpng home/chrome-bot/chrome_root/src/third_party/libpng
-third_party_libsecret home/chrome-bot/chrome_root/src/third_party/libsecret
-third_party_libsrtp home/chrome-bot/chrome_root/src/third_party/libsrtp
-third_party_libsync home/chrome-bot/chrome_root/src/third_party/libsync
-third_party_libudev home/chrome-bot/chrome_root/src/third_party/libudev
-third_party_libusb home/chrome-bot/chrome_root/src/third_party/libusb
-third_party_libva home/chrome-bot/chrome_root/src/third_party/libva
-third_party_libvpx home/chrome-bot/chrome_root/src/third_party/libvpx
-third_party_libwebm home/chrome-bot/chrome_root/src/third_party/libwebm
-third_party_libwebp home/chrome-bot/chrome_root/src/third_party/libwebp
-third_party_libxml home/chrome-bot/chrome_root/src/third_party/libxml
-third_party_libXNVCtrl home/chrome-bot/chrome_root/src/third_party/libXNVCtrl
-third_party_libxslt home/chrome-bot/chrome_root/src/third_party/libxslt
-third_party_libyuv home/chrome-bot/chrome_root/src/third_party/libyuv
-third_party_llvm-build home/chrome-bot/chrome_root/src/third_party/llvm-build
-third_party_logilab home/chrome-bot/chrome_root/src/third_party/logilab
-third_party_lss home/chrome-bot/chrome_root/src/third_party/lss
-third_party_lzma_sdk home/chrome-bot/chrome_root/src/third_party/lzma_sdk
-third_party_mach_override home/chrome-bot/chrome_root/src/third_party/mach_override
-third_party_markdown home/chrome-bot/chrome_root/src/third_party/markdown
-third_party_markupsafe home/chrome-bot/chrome_root/src/third_party/markupsafe
-third_party_mesa home/chrome-bot/chrome_root/src/third_party/mesa
-third_party_minigbm home/chrome-bot/chrome_root/src/third_party/minigbm
-third_party_mocha home/chrome-bot/chrome_root/src/third_party/mocha
-third_party_mockito home/chrome-bot/chrome_root/src/third_party/mockito
-third_party_modp_b64 home/chrome-bot/chrome_root/src/third_party/modp_b64
-third_party_molokocacao home/chrome-bot/chrome_root/src/third_party/molokocacao
-third_party_motemplate home/chrome-bot/chrome_root/src/third_party/motemplate
-third_party_mozilla home/chrome-bot/chrome_root/src/third_party/mozilla
-third_party_mt19937ar home/chrome-bot/chrome_root/src/third_party/mt19937ar
-third_party_netty4 home/chrome-bot/chrome_root/src/third_party/netty4
-third_party_netty-tcnative home/chrome-bot/chrome_root/src/third_party/netty-tcnative
-third_party_ocmock home/chrome-bot/chrome_root/src/third_party/ocmock
-third_party_openh264 home/chrome-bot/chrome_root/src/third_party/openh264
-third_party_openmax_dl home/chrome-bot/chrome_root/src/third_party/openmax_dl
-third_party_opus home/chrome-bot/chrome_root/src/third_party/opus
-third_party_ots home/chrome-bot/chrome_root/src/third_party/ots
-third_party_ow2_asm home/chrome-bot/chrome_root/src/third_party/ow2_asm
-third_party_pdfium home/chrome-bot/chrome_root/src/third_party/pdfium
-third_party_pexpect home/chrome-bot/chrome_root/src/third_party/pexpect
-third_party_ply home/chrome-bot/chrome_root/src/third_party/ply
-third_party_polymer home/chrome-bot/chrome_root/src/third_party/polymer
-third_party_PRESUBMIT.py home/chrome-bot/chrome_root/src/third_party/PRESUBMIT.py
-third_party_proguard home/chrome-bot/chrome_root/src/third_party/proguard
-third_party_protobuf home/chrome-bot/chrome_root/src/third_party/protobuf
-third_party_pycoverage home/chrome-bot/chrome_root/src/third_party/pycoverage
-third_party_pyelftools home/chrome-bot/chrome_root/src/third_party/pyelftools
-third_party_pyftpdlib home/chrome-bot/chrome_root/src/third_party/pyftpdlib
-third_party_pylint home/chrome-bot/chrome_root/src/third_party/pylint
-third_party_pymock home/chrome-bot/chrome_root/src/third_party/pymock
-third_party_python_gflags home/chrome-bot/chrome_root/src/third_party/python_gflags
-third_party_Python-Markdown home/chrome-bot/chrome_root/src/third_party/Python-Markdown
-third_party_py_trace_event home/chrome-bot/chrome_root/src/third_party/py_trace_event
-third_party_pywebsocket home/chrome-bot/chrome_root/src/third_party/pywebsocket
-third_party_qcms home/chrome-bot/chrome_root/src/third_party/qcms
-third_party_qunit home/chrome-bot/chrome_root/src/third_party/qunit
-third_party_re2 home/chrome-bot/chrome_root/src/third_party/re2
-third_party_requests home/chrome-bot/chrome_root/src/third_party/requests
-third_party_robolectric home/chrome-bot/chrome_root/src/third_party/robolectric
-third_party_scons-2.0.1 home/chrome-bot/chrome_root/src/third_party/scons-2.0.1
-third_party_sfntly home/chrome-bot/chrome_root/src/third_party/sfntly
-third_party_shaderc home/chrome-bot/chrome_root/src/third_party/shaderc
-third_party_simplejson home/chrome-bot/chrome_root/src/third_party/simplejson
-third_party_sinonjs home/chrome-bot/chrome_root/src/third_party/sinonjs
-third_party_skia home/chrome-bot/chrome_root/src/third_party/skia
-third_party_smhasher home/chrome-bot/chrome_root/src/third_party/smhasher
-third_party_snappy home/chrome-bot/chrome_root/src/third_party/snappy
-third_party_speech-dispatcher home/chrome-bot/chrome_root/src/third_party/speech-dispatcher
-third_party_SPIRV-Tools home/chrome-bot/chrome_root/src/third_party/SPIRV-Tools
-third_party_sqlite home/chrome-bot/chrome_root/src/third_party/sqlite
-third_party_sqlite4java home/chrome-bot/chrome_root/src/third_party/sqlite4java
-third_party_sudden_motion_sensor home/chrome-bot/chrome_root/src/third_party/sudden_motion_sensor
-third_party_swiftshader home/chrome-bot/chrome_root/src/third_party/swiftshader
-third_party_talloc home/chrome-bot/chrome_root/src/third_party/talloc
-third_party_tcmalloc home/chrome-bot/chrome_root/src/third_party/tcmalloc
-third_party_tlslite home/chrome-bot/chrome_root/src/third_party/tlslite
-third_party_typ home/chrome-bot/chrome_root/src/third_party/typ
-third_party_ub-uiautomator home/chrome-bot/chrome_root/src/third_party/ub-uiautomator
-third_party_usb_ids home/chrome-bot/chrome_root/src/third_party/usb_ids
-third_party_usrsctp home/chrome-bot/chrome_root/src/third_party/usrsctp
-third_party_v4l2capture home/chrome-bot/chrome_root/src/third_party/v4l2capture
-third_party_v4l-utils home/chrome-bot/chrome_root/src/third_party/v4l-utils
-third_party_vulkan home/chrome-bot/chrome_root/src/third_party/vulkan
-third_party_wayland home/chrome-bot/chrome_root/src/third_party/wayland
-third_party_wayland-protocols home/chrome-bot/chrome_root/src/third_party/wayland-protocols
-third_party_wds home/chrome-bot/chrome_root/src/third_party/wds
-third_party_web-animations-js home/chrome-bot/chrome_root/src/third_party/web-animations-js
-third_party_webdriver home/chrome-bot/chrome_root/src/third_party/webdriver
-third_party_webgl home/chrome-bot/chrome_root/src/third_party/webgl
-third_party_WebKit home/chrome-bot/chrome_root/src/third_party/WebKit
-third_party_webpagereplay home/chrome-bot/chrome_root/src/third_party/webpagereplay
-third_party_webrtc home/chrome-bot/chrome_root/src/third_party/webrtc
-third_party_webrtc_overrides home/chrome-bot/chrome_root/src/third_party/webrtc_overrides
-third_party_webtreemap home/chrome-bot/chrome_root/src/third_party/webtreemap
-third_party_widevine home/chrome-bot/chrome_root/src/third_party/widevine
-third_party_woff2 home/chrome-bot/chrome_root/src/third_party/woff2
-third_party_wtl home/chrome-bot/chrome_root/src/third_party/wtl
-third_party_x86inc home/chrome-bot/chrome_root/src/third_party/x86inc
-third_party_xdg-utils home/chrome-bot/chrome_root/src/third_party/xdg-utils
-third_party_yasm home/chrome-bot/chrome_root/src/third_party/yasm
-third_party_zlib home/chrome-bot/chrome_root/src/third_party/zlib
-android_webview home/chrome-bot/chrome_root/src/android_webview
-apps home/chrome-bot/chrome_root/src/apps
-ash home/chrome-bot/chrome_root/src/ash
-base home/chrome-bot/chrome_root/src/base
-blimp home/chrome-bot/chrome_root/src/blimp
-blink home/chrome-bot/chrome_root/src/blink
-breakpad home/chrome-bot/chrome_root/src/breakpad
-build home/chrome-bot/chrome_root/src/build
-build_overrides home/chrome-bot/chrome_root/src/build_overrides
-buildtools home/chrome-bot/chrome_root/src/buildtools
-cc home/chrome-bot/chrome_root/src/cc/
-chrome home/chrome-bot/chrome_root/src/chrome/
-chromecast home/chrome-bot/chrome_root/src/chromecast/
-chrome_elf home/chrome-bot/chrome_root/src/chrome_elf
-chromeos home/chrome-bot/chrome_root/src/chromeos
-components home/chrome-bot/chrome_root/src/components
-content home/chrome-bot/chrome_root/src/content
-courgette home/chrome-bot/chrome_root/src/courgette
-crypto home/chrome-bot/chrome_root/src/crypto
-data home/chrome-bot/chrome_root/src/data
-dbus home/chrome-bot/chrome_root/src/dbus
-DEPS home/chrome-bot/chrome_root/src/DEPS
-device home/chrome-bot/chrome_root/src/device
-docs home/chrome-bot/chrome_root/src/docs
-extensions home/chrome-bot/chrome_root/src/extensions
-gin home/chrome-bot/chrome_root/src/gin
-google_apis home/chrome-bot/chrome_root/src/google_apis
-google_update home/chrome-bot/chrome_root/src/google_update
-gpu home/chrome-bot/chrome_root/src/gpu
-headless home/chrome-bot/chrome_root/src/headless
-infra home/chrome-bot/chrome_root/src/infra
-internal_gyp home/chrome-bot/chrome_root/src/internal_gyp
-ios home/chrome-bot/chrome_root/src/ios
-ipc home/chrome-bot/chrome_root/src/ipc
-jingle home/chrome-bot/chrome_root/src/jingle
-mash home/chrome-bot/chrome_root/src/mash
-media home/chrome-bot/chrome_root/src/media
-mojo home/chrome-bot/chrome_root/src/mojo
-native_client home/chrome-bot/chrome_root/src/native_client
-native_client_sdk home/chrome-bot/chrome_root/src/native_client_sdk
-net home/chrome-bot/chrome_root/src/net
-out home/chrome-bot/chrome_root/src/out
-out_BOARD home/chrome-bot/chrome_root/src/out_BOARD
-pdf home/chrome-bot/chrome_root/src/pdf
-ppapi home/chrome-bot/chrome_root/src/ppapi
-printing home/chrome-bot/chrome_root/src/printing
-remoting home/chrome-bot/chrome_root/src/remoting
-rlz home/chrome-bot/chrome_root/src/rlz
-sandbox home/chrome-bot/chrome_root/src/sandbox
-sdch home/chrome-bot/chrome_root/src/sdch
-services home/chrome-bot/chrome_root/src/services
-skia home/chrome-bot/chrome_root/src/skia
-sql home/chrome-bot/chrome_root/src/sql
-storage home/chrome-bot/chrome_root/src/storage
-styleguide home/chrome-bot/chrome_root/src/styleguide
-sync home/chrome-bot/chrome_root/src/sync
-testing home/chrome-bot/chrome_root/src/testing
-tools home/chrome-bot/chrome_root/src/tools
-ui home/chrome-bot/chrome_root/src/ui
-url home/chrome-bot/chrome_root/src/url
-v8 home/chrome-bot/chrome_root/src/v8
-webkit home/chrome-bot/chrome_root/src/webkit
-third_party_kernel /mnt/host/source/src/third_party/kernel
-build_sys-kernel /build/BOARD/var/cache/portage/sys-kernel
-build_var_cache_portage /build/BOARD/var/cache/portage
-build_pepper_flash /build/BOARD/tmp/portage/chromeos-base/pepper-flash
-build_media_sound /build/BOARD/tmp/portage/media-sound/
-build_media_libs /build/BOARD/tmp/portage/media-libs/
-build_net_dns /build/BOARD/tmp/portage/net-dns
-build_sys_apps /build/BOARD/tmp/portage/sys-apps
-build_app_shells /build/BOARD/tmp/portage/app-shells
-build_x11_libs /build/BOARD/tmp/portage/x11-libs
-build_dev_libs /build/BOARD/tmp/portage/dev-libs
-build_dev_db /build/BOARD/tmp/portage/dev-db
-build_sys_libs /build/BOARD/tmp/portage/sys-libs
-build_app_arch /build/BOARD/tmp/portage/app-arch
-build_app_crypt /build/BOARD/tmp/portage/app-crypt
-build_rsyslog /build/BOARD/tmp/portage/app-admin/rsyslog
-build_net_misc /build/BOARD/tmp/portage/net-misc
-build_sys_fs /build/BOARD/tmp/portage/sys-fs
-build_update_engine /build/BOARD/tmp/portage/chromeos-base/update_engine
-build_libchrome /build/BOARD/tmp/portage/chromeos-base/libchrome
-build_gestures /build/BOARD/tmp/portage/chromeos-base/gestures
-build_libbrillo /build/BOARD/tmp/portage/chromeos-base/libbrillo
-build_shill /build/BOARD/tmp/portage/chromeos-base/shill
-build_libevdev /build/BOARD/tmp/portage/chromeos-base/libevdev
-build_chromeos_base /build/BOARD/tmp/portage/chromeos-base
-build_net_wireless /build/BOARD/tmp/portage/net-wireless
-build_sys_power /build/BOARD/tmp/portage/sys-power/
-build_tmp_portage /build/BOARD/tmp/portage
-usr_include /build/BOARD/usr/include
-blink_bindings /var/cache/chromeos-chrome/chrome-src-internal/src/out_BOARD/Release/gen/blink/bindings/
-var_cache /var/cache
-gcc_stl /usr/lib/gcc/x86_64-cros-linux-gnu/
-gcc_stl /mnt/host/source/src/third_party/gcc/
-libc /var/tmp/portage/cross-x86_64-cros-linux-gnu/
-libc sysdeps/
-libc nptl/
-others /
-others .
diff --git a/user_activity_benchmarks/process_hot_functions.py b/user_activity_benchmarks/process_hot_functions.py
deleted file mode 100755
index 2fbf3f9..0000000
--- a/user_activity_benchmarks/process_hot_functions.py
+++ /dev/null
@@ -1,482 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Processes the functions from the pprof(go/pprof) files and CWP(go/cwp) data.
-
-The pprof --top and pprof --tree outputs should be extracted from the benchmark
-profiles. The outputs contain the hot functions and the call chains.
-
-For each pair of pprof --top and --tree output files, the tool will create a
-file that contains the hot functions present also in the extracted CWP data.
-The common functions are organized in groups that represent a Chrome OS
-component. A function belongs to a group that is defined by a given file path
-if it is declared in a file that shares that path.
-
-A set of metrics are computed for each function, benchmark and Chrome OS group
-covered by a benchmark.
-
-Afterwards, this script extracts the functions that are present in the CWP
-data and not in the benchmark profiles. The extra functions are also groupped
-in Chrome OS components.
-"""
-
-from collections import defaultdict
-
-import argparse
-import os
-import shutil
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class HotFunctionsProcessor(object):
-  """Does the pprof and CWP output processing.
-
-  Extracts the common, extra functions from the pprof files, groups them in
-  Chrome OS components. Computes the metrics for the common functions,
-  benchmark and Chrome OS groups covered by a benchmark.
-  """
-
-  def __init__(self, pprof_top_path, pprof_tree_path, cwp_inclusive_count_file,
-               cwp_pairwise_inclusive_count_file, cwp_function_groups_file,
-               common_functions_path, common_functions_groups_path,
-               benchmark_set_metrics_file, extra_cwp_functions_file,
-               extra_cwp_functions_groups_file,
-               extra_cwp_functions_groups_path):
-    """Initializes the HotFunctionsProcessor.
-
-    Args:
-      pprof_top_path: The directory containing the files with the pprof --top
-        output.
-      pprof_tree_path: The directory containing the files with the pprof --tree
-        output.
-      cwp_inclusive_count_file: The CSV file containing the CWP functions with
-        the inclusive count values.
-      cwp_pairwise_inclusive_count_file: The CSV file containing the CWP pairs
-        of parent and child functions with their inclusive count values.
-      cwp_function_groups_file: The file that contains the CWP function groups.
-      common_functions_path: The directory containing the CSV output files
-        with the common functions of the benchmark profiles and CWP data.
-      common_functions_groups_path: The directory containing the CSV output
-        files with the CWP groups and their metrics that match the common
-        functions of the benchmark profiles and CWP.
-      benchmark_set_metrics_file: The CSV output file containing the metrics for
-        each benchmark.
-      extra_cwp_functions_file: The CSV output file containing the functions
-        that are in the CWP data, but are not in any of the benchmark profiles.
-      extra_cwp_functions_groups_file: The CSV output file containing the groups
-        that match the extra CWP functions and their statistics.
-      extra_cwp_functions_groups_path: The directory containing the CSV output
-        files with the extra CWP functions that match a particular group.
-    """
-    self._pprof_top_path = pprof_top_path
-    self._pprof_tree_path = pprof_tree_path
-    self._cwp_inclusive_count_file = cwp_inclusive_count_file
-    self._cwp_pairwise_inclusive_count_file = cwp_pairwise_inclusive_count_file
-    self._cwp_function_groups_file = cwp_function_groups_file
-    self._common_functions_path = common_functions_path
-    self._common_functions_groups_path = common_functions_groups_path
-    self._benchmark_set_metrics_file = benchmark_set_metrics_file
-    self._extra_cwp_functions_file = extra_cwp_functions_file
-    self._extra_cwp_functions_groups_file = extra_cwp_functions_groups_file
-    self._extra_cwp_functions_groups_path = extra_cwp_functions_groups_path
-
-  def ProcessHotFunctions(self):
-    """Does the processing of the hot functions."""
-    with open(self._cwp_function_groups_file) as input_file:
-      cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
-    cwp_statistics = \
-      self.ExtractCommonFunctions(self._pprof_top_path,
-                                  self._pprof_tree_path,
-                                  self._cwp_inclusive_count_file,
-                                  self._cwp_pairwise_inclusive_count_file,
-                                  cwp_function_groups,
-                                  self._common_functions_path,
-                                  self._common_functions_groups_path,
-                                  self._benchmark_set_metrics_file)
-    self.ExtractExtraFunctions(cwp_statistics, self._extra_cwp_functions_file)
-    self.GroupExtraFunctions(cwp_statistics, cwp_function_groups,
-                             self._extra_cwp_functions_groups_path,
-                             self._extra_cwp_functions_groups_file)
-
-  def ExtractCommonFunctions(self, pprof_top_path, pprof_tree_path,
-                             cwp_inclusive_count_file,
-                             cwp_pairwise_inclusive_count_file,
-                             cwp_function_groups, common_functions_path,
-                             common_functions_groups_path,
-                             benchmark_set_metrics_file):
-    """Extracts the common functions of the benchmark profiles and the CWP data.
-
-    For each pair of pprof --top and --tree output files, it creates a separate
-    file with the same name containing the common functions specifications and
-    metrics, that will be placed in the common_functions_path directory.
-
-    The resulting file is in CSV format, containing the following fields:
-    function name, file name, object, inclusive count, inclusive_count_fraction,
-    flat, flat%, sum%, cum, cum%, distance and score.
-
-    For each pair of pprof files, an additional file is created with the
-    Chrome OS groups that match the common functions.
-
-    The file is in CSV format containing the fields: group name, group path,
-    the number of functions that match the group, the average and cumulative
-    distance, the average and cumulative score.
-    The file has the same name with the pprof file and it is placed in the
-    common_functions_groups_path directory.
-
-    For all the analyzed benchmarks, the method creates a CSV output file
-    containing the metrics for each benchmark. The CSV fields include the
-    benchmark name, the number of common functions, the average and
-    cumulative distance and score.
-
-    It builds a dict of the CWP statistics by calling the
-    utils.ParseCWPInclusiveCountFile method and if a function is common, it is
-    marked as a COMMON_FUNCTION.
-
-    Args:
-      pprof_top_path: The name of the directory with the files with the
-        pprof --top output.
-      pprof_tree_path: The name of the directory with the files with the
-        pprof --tree output.
-      cwp_inclusive_count_file: A dict with the inclusive count values.
-      cwp_pairwise_inclusive_count_file: A dict with the pairwise inclusive
-        count values.
-      cwp_function_groups: A list of tuples containing the name of the group
-        and the corresponding file path.
-      common_functions_path: The path containing the output files with the
-        common functions and their metrics.
-      common_functions_groups_path: The path containing the output files with
-        the Chrome OS groups that match the common functions and their metrics.
-      benchmark_set_metrics_file: The CSV output file containing the metrics for
-        all the analyzed benchmarks.
-
-    Returns:
-      A dict containing the CWP statistics with the common functions marked as
-      COMMON_FUNCTION.
-    """
-    cwp_inclusive_count_statistics = \
-        utils.ParseCWPInclusiveCountFile(cwp_inclusive_count_file)
-    cwp_pairwise_inclusive_count_statistics = \
-        utils.ParseCWPPairwiseInclusiveCountFile(
-            cwp_pairwise_inclusive_count_file)
-    cwp_inclusive_count_statistics_cumulative = \
-        utils.ComputeCWPCummulativeInclusiveStatistics(
-            cwp_inclusive_count_statistics)
-    cwp_pairwise_inclusive_count_fractions = \
-        utils.ComputeCWPChildFunctionsFractions(
-            cwp_inclusive_count_statistics_cumulative,
-            cwp_pairwise_inclusive_count_statistics)
-    benchmark_set_metrics = {}
-    pprof_files = os.listdir(pprof_top_path)
-
-    for pprof_file in pprof_files:
-      pprof_top_statistics = \
-          utils.ParsePprofTopOutput(os.path.join(pprof_top_path, pprof_file))
-      pprof_tree_statistics = \
-          utils.ParsePprofTreeOutput(os.path.join(pprof_tree_path, pprof_file))
-      common_functions_lines = []
-      benchmark_function_metrics = {}
-
-      for function_key, function_statistic in pprof_top_statistics.iteritems():
-        if function_key not in cwp_inclusive_count_statistics:
-          continue
-
-        cwp_dso_name, cwp_inclusive_count, cwp_inclusive_count_fraction, _ = \
-            cwp_inclusive_count_statistics[function_key]
-        cwp_inclusive_count_statistics[function_key] = \
-            (cwp_dso_name, cwp_inclusive_count, cwp_inclusive_count_fraction,
-             utils.COMMON_FUNCTION)
-
-        function_name, _ = function_key.split(',')
-        distance = benchmark_metrics.ComputeDistanceForFunction(
-            pprof_tree_statistics[function_key],
-            cwp_pairwise_inclusive_count_fractions.get(function_name, {}))
-        benchmark_cum_p = float(function_statistic[4])
-        score = benchmark_metrics.ComputeScoreForFunction(
-            distance, cwp_inclusive_count_fraction, benchmark_cum_p)
-        benchmark_function_metrics[function_key] = (distance, score)
-
-        common_functions_lines.append(','.join([function_key, cwp_dso_name, str(
-            cwp_inclusive_count), str(cwp_inclusive_count_fraction), ','.join(
-                function_statistic), str(distance), str(score)]))
-      benchmark_function_groups_statistics = \
-          benchmark_metrics.ComputeMetricsForComponents(
-              cwp_function_groups, benchmark_function_metrics)
-      benchmark_set_metrics[pprof_file] = \
-          benchmark_metrics.ComputeMetricsForBenchmark(
-              benchmark_function_metrics)
-
-      with open(os.path.join(common_functions_path, pprof_file), 'w') \
-          as output_file:
-        common_functions_lines.sort(
-            key=lambda x: float(x.split(',')[11]), reverse=True)
-        common_functions_lines.insert(0, 'function,file,dso,inclusive_count,'
-                                      'inclusive_count_fraction,flat,flat%,'
-                                      'sum%,cum,cum%,distance,score')
-        output_file.write('\n'.join(common_functions_lines))
-
-      with open(os.path.join(common_functions_groups_path, pprof_file), 'w') \
-          as output_file:
-        common_functions_groups_lines = \
-            [','.join([group_name, ','.join(
-                [str(statistic) for statistic in group_statistic])])
-             for group_name, group_statistic in
-             benchmark_function_groups_statistics.iteritems()]
-        common_functions_groups_lines.sort(
-            key=lambda x: float(x.split(',')[5]), reverse=True)
-        common_functions_groups_lines.insert(
-            0, 'group_name,file_path,number_of_functions,distance_cum,'
-            'distance_avg,score_cum,score_avg')
-        output_file.write('\n'.join(common_functions_groups_lines))
-
-    with open(benchmark_set_metrics_file, 'w') as output_file:
-      benchmark_set_metrics_lines = []
-
-      for benchmark_name, metrics in benchmark_set_metrics.iteritems():
-        benchmark_set_metrics_lines.append(','.join([benchmark_name, ','.join(
-            [str(metric) for metric in metrics])]))
-      benchmark_set_metrics_lines.sort(
-          key=lambda x: float(x.split(',')[4]), reverse=True)
-      benchmark_set_metrics_lines.insert(
-          0, 'benchmark_name,number_of_functions,distance_cum,distance_avg,'
-          'score_cum,score_avg')
-      output_file.write('\n'.join(benchmark_set_metrics_lines))
-
-    return cwp_inclusive_count_statistics
-
-  def GroupExtraFunctions(self, cwp_statistics, cwp_function_groups,
-                          extra_cwp_functions_groups_path,
-                          extra_cwp_functions_groups_file):
-    """Groups the extra functions.
-
-    Writes the data of the functions that belong to each group in a separate
-    file, sorted by their inclusive count value, in descending order. The file
-    name is the same as the group name.
-
-    The file is in CSV format, containing the fields: function name, file name,
-    object name, inclusive count, inclusive count fraction.
-
-    It creates a CSV file containing the name of the group, their
-    common path, the total inclusive count and inclusive count fraction values
-    of all the functions declared in files that share the common path, sorted
-    in descending order by the inclusive count value.
-
-    Args:
-      cwp_statistics: A dict containing the CWP statistics.
-      cwp_function_groups: A list of tuples with the groups names and the path
-        describing the groups.
-      extra_cwp_functions_groups_path: The name of the directory containing
-        the CSV output files with the extra CWP functions that match a
-        particular group.
-      extra_cwp_functions_groups_file: The CSV output file containing the groups
-        that match the extra functions and their statistics.
-    """
-    cwp_function_groups_statistics = defaultdict(lambda: ([], '', 0, 0.0))
-    for function, statistics in cwp_statistics.iteritems():
-      if statistics[3] == utils.COMMON_FUNCTION:
-        continue
-
-      file_name = function.split(',')[1]
-      group_inclusive_count = int(statistics[1])
-      group_inclusive_count_fraction = float(statistics[2])
-
-      for group in cwp_function_groups:
-        group_common_path = group[1]
-
-        if group_common_path not in file_name:
-          continue
-
-        group_name = group[0]
-        group_statistics = cwp_function_groups_statistics[group_name]
-        group_lines = group_statistics[0]
-        group_inclusive_count += group_statistics[2]
-        group_inclusive_count_fraction += group_statistics[3]
-
-        group_lines.append(','.join([function, statistics[0],
-                                     str(statistics[1]), str(statistics[2])]))
-        cwp_function_groups_statistics[group_name] = \
-            (group_lines, group_common_path, group_inclusive_count,
-             group_inclusive_count_fraction)
-        break
-
-    extra_cwp_functions_groups_lines = []
-    for group_name, group_statistics \
-        in cwp_function_groups_statistics.iteritems():
-      group_output_lines = group_statistics[0]
-      group_output_lines.sort(key=lambda x: int(x.split(',')[3]), reverse=True)
-      group_output_lines.insert(
-          0, 'function,file,dso,inclusive_count,inclusive_count_fraction')
-      with open(os.path.join(extra_cwp_functions_groups_path, group_name),
-                'w') as output_file:
-        output_file.write('\n'.join(group_output_lines))
-      extra_cwp_functions_groups_lines.append(','.join(
-          [group_name, group_statistics[1], str(group_statistics[2]), str(
-              group_statistics[3])]))
-
-    extra_cwp_functions_groups_lines.sort(
-        key=lambda x: int(x.split(',')[2]), reverse=True)
-    extra_cwp_functions_groups_lines.insert(
-        0, 'group,shared_path,inclusive_count,inclusive_count_fraction')
-    with open(extra_cwp_functions_groups_file, 'w') as output_file:
-      output_file.write('\n'.join(extra_cwp_functions_groups_lines))
-
-  def ExtractExtraFunctions(self, cwp_statistics, extra_cwp_functions_file):
-    """Gets the functions that are in the CWP data, but not in the pprof output.
-
-    Writes the functions and their statistics in the extra_cwp_functions_file
-    file. The output is sorted based on the inclusive_count value. The file is
-    in CSV format, containing the fields: function name, file name, object name,
-    inclusive count and inclusive count fraction.
-
-    Args:
-      cwp_statistics: A dict containing the CWP statistics indexed by the
-        function and the file name, comma separated.
-      extra_cwp_functions_file: The file where it should be stored the CWP
-        functions and statistics that are marked as EXTRA_FUNCTION.
-    """
-    output_lines = []
-
-    for function, statistics in cwp_statistics.iteritems():
-      if statistics[3] == utils.EXTRA_FUNCTION:
-        output_lines.append(','.join([function, statistics[0],
-                                      str(statistics[1]), str(statistics[2])]))
-
-    with open(extra_cwp_functions_file, 'w') as output_file:
-      output_lines.sort(key=lambda x: int(x.split(',')[3]), reverse=True)
-      output_lines.insert(0, 'function,file,dso,inclusive_count,'
-                          'inclusive_count_fraction')
-      output_file.write('\n'.join(output_lines))
-
-
-def ParseArguments(arguments):
-  parser = argparse.ArgumentParser()
-
-  parser.add_argument(
-      '--pprof_top_path',
-      required=True,
-      help='The directory containing the files with the pprof --top output of '
-      'the benchmark profiles (the hot functions). The name of the files '
-      'should match with the ones from the pprof tree output files.')
-  parser.add_argument(
-      '--pprof_tree_path',
-      required=True,
-      help='The directory containing the files with the pprof --tree output '
-      'of the benchmark profiles (the call chains). The name of the files '
-      'should match with the ones of the pprof top output files.')
-  parser.add_argument(
-      '--cwp_inclusive_count_file',
-      required=True,
-      help='The CSV file containing the CWP hot functions with their '
-      'inclusive_count values. The CSV fields include the name of the '
-      'function, the file and the object with the definition, the inclusive '
-      'count value and the inclusive count fraction out of the total amount of '
-      'inclusive count values.')
-  parser.add_argument(
-      '--cwp_pairwise_inclusive_count_file',
-      required=True,
-      help='The CSV file containing the CWP pairs of parent and child '
-      'functions with their inclusive count values. The CSV fields include the '
-      'name of the parent and child functions concatenated by ;;, the file '
-      'and the object with the definition of the child function, and the '
-      'inclusive count value.')
-  parser.add_argument(
-      '--cwp_function_groups_file',
-      required=True,
-      help='The file that contains the CWP function groups. A line consists in '
-      'the group name and a file path describing the group. A group must '
-      'represent a ChromeOS component.')
-  parser.add_argument(
-      '--common_functions_path',
-      required=True,
-      help='The directory containing the CSV output files with the common '
-      'functions of the benchmark profiles and CWP data. A file will contain '
-      'all the hot functions from a pprof top output file that are also '
-      'included in the file containing the cwp inclusive count values. The CSV '
-      'fields are: the function name, the file and the object where the '
-      'function is declared, the CWP inclusive count and inclusive count '
-      'fraction values, the cumulative and average distance, the cumulative '
-      'and average score. The files with the common functions will have the '
-      'same names with the corresponding pprof output files.')
-  parser.add_argument(
-      '--common_functions_groups_path',
-      required=True,
-      help='The directory containing the CSV output files with the Chrome OS '
-      'groups and their metrics that match the common functions of the '
-      'benchmark profiles and CWP. The files with the groups will have the '
-      'same names with the corresponding pprof output files. The CSV fields '
-      'include the group name, group path, the number of functions that match '
-      'the group, the average and cumulative distance, the average and '
-      'cumulative score.')
-  parser.add_argument(
-      '--benchmark_set_metrics_file',
-      required=True,
-      help='The CSV output file containing the metrics for each benchmark. The '
-      'CSV fields include the benchmark name, the number of common functions, '
-      'the average and cumulative distance and score.')
-  parser.add_argument(
-      '--extra_cwp_functions_file',
-      required=True,
-      help='The CSV output file containing the functions that are in the CWP '
-      'data, but are not in any of the benchmark profiles. The CSV fields '
-      'include the name of the function, the file name and the object with the '
-      'definition, and the CWP inclusive count and inclusive count fraction '
-      'values. The entries are sorted in descending order based on the '
-      'inclusive count value.')
-  parser.add_argument(
-      '--extra_cwp_functions_groups_file',
-      required=True,
-      help='The CSV output file containing the groups that match the extra CWP '
-      'functions and their statistics. The CSV fields include the group name, '
-      'the file path, the total inclusive count and inclusive count fraction '
-      'values of the functions matching a particular group.')
-  parser.add_argument(
-      '--extra_cwp_functions_groups_path',
-      required=True,
-      help='The directory containing the CSV output files with the extra CWP '
-      'functions that match a particular group. The name of the file is the '
-      'same as the group name. The CSV fields include the name of the '
-      'function, the file name and the object with the definition, and the CWP '
-      'inclusive count and inclusive count fraction values. The entries are '
-      'sorted in descending order based on the inclusive count value.')
-
-  options = parser.parse_args(arguments)
-
-  return options
-
-
-def Main(argv):
-  options = ParseArguments(argv)
-
-  if os.path.exists(options.common_functions_path):
-    shutil.rmtree(options.common_functions_path)
-
-  os.makedirs(options.common_functions_path)
-
-  if os.path.exists(options.common_functions_groups_path):
-    shutil.rmtree(options.common_functions_groups_path)
-
-  os.makedirs(options.common_functions_groups_path)
-
-  if os.path.exists(options.extra_cwp_functions_groups_path):
-    shutil.rmtree(options.extra_cwp_functions_groups_path)
-
-  os.makedirs(options.extra_cwp_functions_groups_path)
-
-  hot_functions_processor = HotFunctionsProcessor(
-      options.pprof_top_path, options.pprof_tree_path,
-      options.cwp_inclusive_count_file,
-      options.cwp_pairwise_inclusive_count_file,
-      options.cwp_function_groups_file, options.common_functions_path,
-      options.common_functions_groups_path, options.benchmark_set_metrics_file,
-      options.extra_cwp_functions_file, options.extra_cwp_functions_groups_file,
-      options.extra_cwp_functions_groups_path)
-
-  hot_functions_processor.ProcessHotFunctions()
-
-
-if __name__ == '__main__':
-  Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/process_hot_functions_unittest.py b/user_activity_benchmarks/process_hot_functions_unittest.py
deleted file mode 100755
index 0ad248b..0000000
--- a/user_activity_benchmarks/process_hot_functions_unittest.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the process_hot_functions module."""
-
-from process_hot_functions import HotFunctionsProcessor, ParseArguments
-
-import mock
-import os
-import shutil
-import tempfile
-import unittest
-
-
-class ParseArgumentsTest(unittest.TestCase):
-  """Test class for command line argument parsing."""
-
-  def __init__(self, *args, **kwargs):
-    super(ParseArgumentsTest, self).__init__(*args, **kwargs)
-
-  def testParseArguments(self):
-    arguments = \
-      ['-p', 'dummy_pprof', '-c', 'dummy_common', '-e', 'dummy_extra', '-w',
-       'dummy_cwp']
-    options = ParseArguments(arguments)
-
-    self.assertEqual(options.pprof_path, 'dummy_pprof')
-    self.assertEqual(options.cwp_hot_functions_file, 'dummy_cwp')
-    self.assertEqual(options.common_functions_path, 'dummy_common')
-    self.assertEqual(options.extra_cwp_functions_file, 'dummy_extra')
-
-  @mock.patch('sys.exit')
-  def testDeathParseArguments(self, sys_exit_method):
-    self.assertFalse(sys_exit_method.called)
-    ParseArguments([])
-    self.assertTrue(sys_exit_method.called)
-    self.assertNotEqual(sys_exit_method.return_value, 0)
-
-
-class HotFunctionsProcessorTest(unittest.TestCase):
-  """Test class for HotFunctionsProcessor class."""
-
-  def __init__(self, *args, **kwargs):
-    super(HotFunctionsProcessorTest, self).__init__(*args, **kwargs)
-    self._pprof_path = 'testdata/input/pprof'
-    self._cwp_functions_file = 'testdata/input/cwp_functions_file.csv'
-    self._cwp_functions_file_parsing = \
-      'testdata/input/parse_cwp_statistics.csv'
-    self._common_functions_path = ''
-    self._expected_common_functions_path = 'testdata/expected/pprof_common'
-    self._extra_cwp_functions_file = ''
-    self._cwp_function_groups_file = 'testdata/input/cwp_function_groups'
-    self._cwp_function_groups_statistics_file = 'dummy'
-    self._cwp_function_groups_file_prefix = 'dummy'
-
-  def _CreateHotFunctionsProcessor(self,
-                                   extra_cwp_functions_file,
-                                   cwp_function_groups_file=None,
-                                   cwp_function_groups_statistics_file=None,
-                                   cwp_function_groups_file_prefix=None):
-    return HotFunctionsProcessor(self._pprof_path, self._cwp_functions_file,
-                                 self._common_functions_path,
-                                 extra_cwp_functions_file,
-                                 cwp_function_groups_file,
-                                 cwp_function_groups_statistics_file,
-                                 cwp_function_groups_file_prefix)
-
-  def checkFileContents(self, file_name, expected_content_lines):
-    with open(file_name, 'r') as input_file:
-      result_content_lines = input_file.readlines()
-    self.assertListEqual(expected_content_lines, result_content_lines)
-
-  @mock.patch.object(HotFunctionsProcessor, 'ExtractCommonFunctions')
-  @mock.patch.object(HotFunctionsProcessor, 'ExtractExtraFunctions')
-  @mock.patch.object(HotFunctionsProcessor, 'GroupExtraFunctions')
-  def testProcessHotFunctionsNoGroupping(self, group_functions_method,
-                                         extra_functions_method,
-                                         common_functions_method):
-    hot_functions_processor = self._CreateHotFunctionsProcessor(
-        self._extra_cwp_functions_file)
-
-    hot_functions_processor.ProcessHotFunctions()
-
-    self.assertTrue(common_functions_method.called)
-    self.assertTrue(extra_functions_method.called)
-    self.assertEqual(common_functions_method.call_count, 1)
-    self.assertEqual(extra_functions_method.call_count, 1)
-    self.assertFalse(group_functions_method.called)
-
-  @mock.patch.object(HotFunctionsProcessor, 'ExtractCommonFunctions')
-  @mock.patch.object(HotFunctionsProcessor, 'ExtractExtraFunctions')
-  @mock.patch.object(HotFunctionsProcessor, 'GroupExtraFunctions')
-  def testProcessHotFunctionsGroupping(self, group_functions_method,
-                                       extra_functions_method,
-                                       common_functions_method):
-    hot_functions_processor = self._CreateHotFunctionsProcessor(
-        self._extra_cwp_functions_file, self._cwp_function_groups_file,
-        self._cwp_function_groups_statistics_file,
-        self._cwp_function_groups_file_prefix)
-
-    hot_functions_processor.ProcessHotFunctions()
-
-    self.assertTrue(common_functions_method.called)
-    self.assertTrue(extra_functions_method.called)
-    self.assertEqual(common_functions_method.call_count, 1)
-    self.assertEqual(extra_functions_method.call_count, 1)
-    self.assertTrue(group_functions_method.called)
-    self.assertEqual(group_functions_method.call_count, 1)
-
-  def testParseCWPStatistics(self):
-    cwp_statistics = {'dummy_method1,dummy_file1': ('dummy_object1,1', 0),
-                      'dummy_method2,dummy_file2': ('dummy_object2,2', 0),
-                      'dummy_method3,dummy_file3': ('dummy_object3,3', 0),
-                      'dummy_method4,dummy_file4': ('dummy_object4,4', 0)}
-    hot_functions_processor = self._CreateHotFunctionsProcessor(
-        self._extra_cwp_functions_file)
-    result = hot_functions_processor.ParseCWPStatistics(
-        self._cwp_functions_file_parsing)
-
-    self.assertDictEqual(result, cwp_statistics)
-
-  def testExtractCommonFunctions(self):
-    hot_functions_processor = self._CreateHotFunctionsProcessor(
-        self._extra_cwp_functions_file)
-    common_functions_path = tempfile.mkdtemp()
-    hot_functions_processor.ExtractCommonFunctions(self._pprof_path,
-                                                   common_functions_path,
-                                                   self._cwp_functions_file)
-    expected_files = \
-      [os.path.join(self._expected_common_functions_path, expected_file)
-       for expected_file in os.listdir(self._expected_common_functions_path)]
-    result_files = \
-      [os.path.join(common_functions_path, result_file)
-       for result_file in os.listdir(common_functions_path)]
-
-    expected_files.sort()
-    result_files.sort()
-
-    for expected_file_name, result_file_name in \
-      zip(expected_files, result_files):
-      with open(expected_file_name) as expected_file:
-        expected_output_lines = expected_file.readlines()
-        self.checkFileContents(result_file_name, expected_output_lines)
-    shutil.rmtree(common_functions_path)
-
-  def testExtractExtraFunctions(self):
-    cwp_statistics = {'dummy_method1,dummy_file1': ('dummy_object1,1', 0),
-                      'dummy_method2,dummy_file2': ('dummy_object2,2', 1),
-                      'dummy_method3,dummy_file3': ('dummy_object3,3', 1),
-                      'dummy_method4,dummy_file4': ('dummy_object4,4', 0)}
-    expected_output_lines = ['function,file,dso,inclusive_count\n',
-                             'dummy_method4,dummy_file4,dummy_object4,4\n',
-                             'dummy_method1,dummy_file1,dummy_object1,1']
-    temp_file, temp_filename = tempfile.mkstemp()
-    os.close(temp_file)
-    hot_functions_processor = self._CreateHotFunctionsProcessor(temp_filename)
-
-    hot_functions_processor.ExtractExtraFunctions(cwp_statistics, temp_filename)
-    self.checkFileContents(temp_filename, expected_output_lines)
-    os.remove(temp_filename)
-
-  def testParseFunctionGroups(self):
-    cwp_function_groups_lines = ['group1 /a\n', 'group2 /b\n', 'group3 /c\n',
-                                 'group4 /d\n']
-    expected_output = [('group1', '/a', 0, []), ('group2', '/b', 0, []),
-                       ('group3', '/c', 0, []), ('group4', '/d', 0, [])]
-    result = HotFunctionsProcessor.ParseFunctionGroups(
-        cwp_function_groups_lines)
-    self.assertListEqual(expected_output, result)
-
-  def testGroupExtraFunctions(self):
-    cwp_statistics = {'dummy_method1,/a/b': ('dummy_object1,1', 1),
-                      'dummy_method2,/c/d': ('dummy_object2,2', 0),
-                      'dummy_method3,/a/b': ('dummy_object3,3', 0),
-                      'dummy_method4,/c/d': ('dummy_object4,4', 1),
-                      'dummy_method5,/a/b': ('dummy_object5,5', 0),
-                      'dummy_method6,/e': ('dummy_object6,6', 0),
-                      'dummy_method7,/c/d': ('dummy_object7,7', 0),
-                      'dummy_method8,/e': ('dummy_object8,8', 0)}
-    cwp_groups_statistics_file, \
-        cwp_groups_statistics_filename = tempfile.mkstemp()
-
-    os.close(cwp_groups_statistics_file)
-
-    cwp_groups_file_path = tempfile.mkdtemp()
-    cwp_groups_file_prefix = os.path.join(cwp_groups_file_path, 'dummy')
-    hot_functions_processor = self._CreateHotFunctionsProcessor(
-        self._extra_cwp_functions_file)
-
-    hot_functions_processor.GroupExtraFunctions(cwp_statistics,
-                                                cwp_groups_file_prefix,
-                                                self._cwp_function_groups_file,
-                                                cwp_groups_statistics_filename)
-
-    expected_group_ab_lines = ['function,file,dso,inclusive_count\n',
-                               'dummy_method5,/a/b,dummy_object5,5\n',
-                               'dummy_method3,/a/b,dummy_object3,3']
-    expected_group_cd_lines = ['function,file,dso,inclusive_count\n',
-                               'dummy_method7,/c/d,dummy_object7,7\n',
-                               'dummy_method2,/c/d,dummy_object2,2']
-    expected_group_e_lines = ['function,file,dso,inclusive_count\n',
-                              'dummy_method8,/e,dummy_object8,8\n',
-                              'dummy_method6,/e,dummy_object6,6']
-    expected_group_statistics_lines = ['group,shared_path,inclusive_count\n',
-                                       'e,/e,14\n', 'cd,/c/d,9\n', 'ab,/a/b,8']
-
-    self.checkFileContents('%sab' % (cwp_groups_file_prefix,),
-                           expected_group_ab_lines)
-    self.checkFileContents('%scd' % (cwp_groups_file_prefix,),
-                           expected_group_cd_lines)
-    self.checkFileContents('%se' % (cwp_groups_file_prefix,),
-                           expected_group_e_lines)
-    self.checkFileContents(cwp_groups_statistics_filename,
-                           expected_group_statistics_lines)
-
-    shutil.rmtree(cwp_groups_file_path)
-    os.remove(cwp_groups_statistics_filename)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/user_activity_benchmarks/select_hot_functions.sql b/user_activity_benchmarks/select_hot_functions.sql
deleted file mode 100644
index d121d61..0000000
--- a/user_activity_benchmarks/select_hot_functions.sql
+++ /dev/null
@@ -1,27 +0,0 @@
--- Collects the function, with its file, the object and inclusive count value.
--- The limits here are entirely arbitrary.
--- For more background, look at
--- https://sites.google.com/a/google.com/cwp/about/callgraphs.
-SELECT
-  frame.function_name AS function,
-  frame.filename AS file,
-  frame.load_module_path AS dso,
-  sum(frame.inclusive_count) AS inclusive_count
-FROM
-  -- Collect the data stored in CWP over the last 30 days.
-  FLATTEN(chromeos_wide_profiling.sampledb.cycles.callgraph.last30days, frame)
-WHERE
-  meta.cros.report_id % UINT64("1") == 0
-  -- The reports were collected periodically.
-  AND meta.cros.collection_info.trigger_event == 1
-  AND `profile.duration_usec` < 2100000
-  -- The reports were from a busy machine.
-  AND session.total_count > 2000
-  --  The reports are from the gnawty board, x86_64 architecture.
-  AND meta.cros.board == "gnawty"
-  AND meta.cros.cpu_architecture == "x86_64"
-  -- The reports include callchain data.
-  AND left(meta.cros.version, 4) > "6970"
-  GROUP BY function, dso, file
-ORDER BY `inclusive_count` DESC
-LIMIT 50000 ;
diff --git a/user_activity_benchmarks/select_optimal_benchmark_set.py b/user_activity_benchmarks/select_optimal_benchmark_set.py
deleted file mode 100755
index 1c8305c..0000000
--- a/user_activity_benchmarks/select_optimal_benchmark_set.py
+++ /dev/null
@@ -1,347 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Selects the optimal set of benchmarks.
-
-For each benchmark, there is a file with the common functions, as extracted by
-the process_hot_functions module.
-
-The script receives as input the CSV file with the CWP inclusive count values,
-the file with Chrome OS groups and the path containing a file with common
-functions for every benchmark.
-
-It extracts for every benchmark and for the CWP data all the functions that
-match the given Chrome OS groups.
-
-It generates all possible combinations of benchmark sets of a given size and
-it computes for every set a metric.
-It outputs the optimal sets, based on which ones have the best metric.
-
-Three different metrics have been used: function count, distance
-variation and score.
-
-For the function count metric, we count the unique functions covered by a
-set of benchmarks. Besides the number of unique functions, we compute also
-the fraction of unique functions out of the amount of CWP functions from the
-given groups. The benchmark set with the highest amount of unique functions
-that belong to all the given groups is considered better.
-
-For the distance variation metric, we compute the sum of the distance variations
-of the functions covered by a set of benchmarks. We define the distance
-variation as the difference between the distance value of a function and the
-ideal distance value (1.0). If a function appears in multiple common functions
-files, we consider only the minimum value. We compute also the distance
-variation per function. The set that has the smaller value for the
-distance variation per function is considered better.
-
-For the score metric, we compute the sum of the scores of the functions from a
-set of benchmarks. If a function appears in multiple common functions files,
-we consider only the maximum value. We compute also the fraction of this sum
-from the sum of all the scores of the functions from the CWP data covering the
-given groups, in the ideal case (the ideal score of a function is 1.0).
-
-We compute the metrics in the same manner for individual Chrome OS groups.
-"""
-
-from collections import defaultdict
-
-import argparse
-import csv
-import itertools
-import json
-import operator
-import os
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class BenchmarkSet(object):
-  """Selects the optimal set of benchmarks of given size."""
-
-  # Constants that specify the metric type.
-  FUNCTION_COUNT_METRIC = 'function_count'
-  DISTANCE_METRIC = 'distance_variation'
-  SCORE_METRIC = 'score_fraction'
-
-  def __init__(self, benchmark_set_size, benchmark_set_output_file,
-               benchmark_set_common_functions_path, cwp_inclusive_count_file,
-               cwp_function_groups_file, metric):
-    """Initializes the BenchmarkSet.
-
-    Args:
-      benchmark_set_size: Constant representing the size of a benchmark set.
-      benchmark_set_output_file: The output file that will contain the set of
-        optimal benchmarks with the metric values.
-      benchmark_set_common_functions_path: The directory containing the files
-        with the common functions for the list of benchmarks.
-      cwp_inclusive_count_file: The CSV file containing the CWP functions with
-        their inclusive count values.
-      cwp_function_groups_file: The file that contains the CWP function groups.
-      metric: The type of metric used for the analysis.
-    """
-    self._benchmark_set_size = int(benchmark_set_size)
-    self._benchmark_set_output_file = benchmark_set_output_file
-    self._benchmark_set_common_functions_path = \
-        benchmark_set_common_functions_path
-    self._cwp_inclusive_count_file = cwp_inclusive_count_file
-    self._cwp_function_groups_file = cwp_function_groups_file
-    self._metric = metric
-
-  @staticmethod
-  def OrganizeCWPFunctionsInGroups(cwp_inclusive_count_statistics,
-                                   cwp_function_groups):
-    """Selects the CWP functions that match the given Chrome OS groups.
-
-    Args:
-      cwp_inclusive_count_statistics: A dict with the CWP functions.
-      cwp_function_groups: A list with the CWP function groups.
-
-    Returns:
-      A dict having as a key the name of the groups and as a value the list of
-      CWP functions that match an individual group.
-    """
-    cwp_functions_grouped = defaultdict(list)
-    for function_key in cwp_inclusive_count_statistics:
-      _, file_name = function_key.split(',')
-      for group_name, file_path in cwp_function_groups:
-        if file_path not in file_name:
-          continue
-        cwp_functions_grouped[group_name].append(function_key)
-        break
-    return cwp_functions_grouped
-
-  @staticmethod
-  def OrganizeBenchmarkSetFunctionsInGroups(benchmark_set_files,
-                                            benchmark_set_common_functions_path,
-                                            cwp_function_groups):
-    """Selects the benchmark functions that match the given Chrome OS groups.
-
-    Args:
-      benchmark_set_files: The list of common functions files corresponding to a
-        benchmark.
-      benchmark_set_common_functions_path: The directory containing the files
-        with the common functions for the list of benchmarks.
-      cwp_function_groups: A list with the CWP function groups.
-
-    Returns:
-      A dict having as a key the name of a common functions file. The value is
-      a dict having as a key the name of a group and as value a list of
-      functions that match the given group.
-    """
-
-    benchmark_set_functions_grouped = {}
-    for benchmark_file_name in benchmark_set_files:
-      benchmark_full_file_path = \
-          os.path.join(benchmark_set_common_functions_path,
-                       benchmark_file_name)
-      with open(benchmark_full_file_path) as input_file:
-        statistics_reader = \
-            csv.DictReader(input_file, delimiter=',')
-        benchmark_functions_grouped = defaultdict(dict)
-        for statistic in statistics_reader:
-          function_name = statistic['function']
-          file_name = statistic['file']
-          for group_name, file_path in cwp_function_groups:
-            if file_path not in file_name:
-              continue
-            function_key = ','.join([function_name, file_name])
-            distance = float(statistic['distance'])
-            score = float(statistic['score'])
-            benchmark_functions_grouped[group_name][function_key] = \
-                (distance, score)
-            break
-          benchmark_set_functions_grouped[benchmark_file_name] = \
-              benchmark_functions_grouped
-    return benchmark_set_functions_grouped
-
-  @staticmethod
-  def SelectOptimalBenchmarkSetBasedOnMetric(all_benchmark_combinations_sets,
-                                             benchmark_set_functions_grouped,
-                                             cwp_functions_grouped,
-                                             metric_function_for_set,
-                                             metric_comparison_operator,
-                                             metric_default_value,
-                                             metric_string):
-    """Generic method that selects the optimal benchmark set based on a metric.
-
-    The reason of implementing a generic function is to avoid logic duplication
-    for selecting a benchmark set based on the three different metrics.
-
-    Args:
-      all_benchmark_combinations_sets: The list with all the sets of benchmark
-        combinations.
-      benchmark_set_functions_grouped: A dict with benchmark functions as
-        returned by OrganizeBenchmarkSetFunctionsInGroups.
-      cwp_functions_grouped: A dict with the CWP functions as returned by
-        OrganizeCWPFunctionsInGroups.
-      metric_function_for_set: The method used to compute the metric for a given
-        benchmark set.
-      metric_comparison_operator: A comparison operator used to compare two
-        values of the same metric (i.e: operator.lt or operator.gt).
-      metric_default_value: The default value for the metric.
-      metric_string: A tuple of strings used in the JSON output for the pair of
-        the values of the metric.
-
-    Returns:
-      A list of tuples containing for each optimal benchmark set. A tuple
-      contains the list of benchmarks from the set, the pair of metric values
-      and a dictionary with the metrics for each group.
-    """
-    optimal_sets = [([], metric_default_value, {})]
-
-    for benchmark_combination_set in all_benchmark_combinations_sets:
-      function_metrics = [benchmark_set_functions_grouped[benchmark]
-                          for benchmark in benchmark_combination_set]
-      set_metrics, set_groups_metrics = \
-          metric_function_for_set(function_metrics, cwp_functions_grouped,
-                                  metric_string)
-      optimal_value = optimal_sets[0][1][0]
-      if metric_comparison_operator(set_metrics[0], optimal_value):
-        optimal_sets = \
-            [(benchmark_combination_set, set_metrics, set_groups_metrics)]
-      elif set_metrics[0] == optimal_sets[0][1][0]:
-        optimal_sets.append(
-            (benchmark_combination_set, set_metrics, set_groups_metrics))
-
-    return optimal_sets
-
-  def SelectOptimalBenchmarkSet(self):
-    """Selects the optimal benchmark sets and writes them in JSON format.
-
-    Parses the CWP inclusive count statistics and benchmark common functions
-    files. Organizes the functions into groups. For every optimal benchmark
-    set, the method writes in the self._benchmark_set_output_file the list of
-    benchmarks, the pair of metrics and a dictionary with the pair of
-    metrics for each group covered by the benchmark set.
-    """
-
-    benchmark_set_files = os.listdir(self._benchmark_set_common_functions_path)
-    all_benchmark_combinations_sets = \
-        itertools.combinations(benchmark_set_files, self._benchmark_set_size)
-
-    with open(self._cwp_function_groups_file) as input_file:
-      cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
-
-    cwp_inclusive_count_statistics = \
-        utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_count_file)
-    cwp_functions_grouped = self.OrganizeCWPFunctionsInGroups(
-        cwp_inclusive_count_statistics, cwp_function_groups)
-    benchmark_set_functions_grouped = \
-        self.OrganizeBenchmarkSetFunctionsInGroups(
-            benchmark_set_files, self._benchmark_set_common_functions_path,
-            cwp_function_groups)
-
-    if self._metric == self.FUNCTION_COUNT_METRIC:
-      metric_function_for_benchmark_set = \
-          benchmark_metrics.ComputeFunctionCountForBenchmarkSet
-      metric_comparison_operator = operator.gt
-      metric_default_value = (0, 0.0)
-      metric_string = ('function_count', 'function_count_fraction')
-    elif self._metric == self.DISTANCE_METRIC:
-      metric_function_for_benchmark_set = \
-          benchmark_metrics.ComputeDistanceForBenchmarkSet
-      metric_comparison_operator = operator.lt
-      metric_default_value = (float('inf'), float('inf'))
-      metric_string = \
-          ('distance_variation_per_function', 'total_distance_variation')
-    elif self._metric == self.SCORE_METRIC:
-      metric_function_for_benchmark_set = \
-          benchmark_metrics.ComputeScoreForBenchmarkSet
-      metric_comparison_operator = operator.gt
-      metric_default_value = (0.0, 0.0)
-      metric_string = ('score_fraction', 'total_score')
-    else:
-      raise ValueError("Invalid metric")
-
-    optimal_benchmark_sets = \
-        self.SelectOptimalBenchmarkSetBasedOnMetric(
-            all_benchmark_combinations_sets, benchmark_set_functions_grouped,
-            cwp_functions_grouped, metric_function_for_benchmark_set,
-            metric_comparison_operator, metric_default_value, metric_string)
-
-    json_output = []
-
-    for benchmark_set in optimal_benchmark_sets:
-      json_entry = {
-          'benchmark_set':
-              list(benchmark_set[0]),
-          'metrics': {
-              metric_string[0]: benchmark_set[1][0],
-              metric_string[1]: benchmark_set[1][1]
-          },
-          'groups':
-              dict(benchmark_set[2])
-      }
-      json_output.append(json_entry)
-
-    with open(self._benchmark_set_output_file, 'w') as output_file:
-      json.dump(json_output, output_file)
-
-
-def ParseArguments(arguments):
-  parser = argparse.ArgumentParser()
-
-  parser.add_argument(
-      '--benchmark_set_common_functions_path',
-      required=True,
-      help='The directory containing the CSV files with the common functions '
-      'of the benchmark profiles and CWP data. A file will contain all the hot '
-      'functions from a pprof top output file that are also included in the '
-      'file containing the cwp inclusive count values. The CSV fields are: the '
-      'function name, the file and the object where the function is declared, '
-      'the CWP inclusive count and inclusive count fraction values, the '
-      'cumulative and average distance, the cumulative and average score. The '
-      'files with the common functions will have the same names with the '
-      'corresponding pprof output files.')
-  parser.add_argument(
-      '--cwp_inclusive_count_file',
-      required=True,
-      help='The CSV file containing the CWP hot functions with their '
-      'inclusive_count values. The CSV fields include the name of the '
-      'function, the file and the object with the definition, the inclusive '
-      'count value and the inclusive count fraction out of the total amount of '
-      'inclusive count values.')
-  parser.add_argument(
-      '--benchmark_set_size',
-      required=True,
-      help='The size of the benchmark sets.')
-  parser.add_argument(
-      '--benchmark_set_output_file',
-      required=True,
-      help='The JSON output file containing optimal benchmark sets with their '
-      'metrics. For every optimal benchmark set, the file contains the list of '
-      'benchmarks, the pair of metrics and a dictionary with the pair of '
-      'metrics for each group covered by the benchmark set.')
-  parser.add_argument(
-      '--metric',
-      required=True,
-      help='The metric used to select the optimal benchmark set. The possible '
-      'values are: distance_variation, function_count and score_fraction.')
-  parser.add_argument(
-      '--cwp_function_groups_file',
-      required=True,
-      help='The file that contains the CWP function groups. A line consists in '
-      'the group name and a file path describing the group. A group must '
-      'represent a Chrome OS component.')
-
-  options = parser.parse_args(arguments)
-
-  return options
-
-
-def Main(argv):
-  options = ParseArguments(argv)
-  benchmark_set = BenchmarkSet(options.benchmark_set_size,
-                               options.benchmark_set_output_file,
-                               options.benchmark_set_common_functions_path,
-                               options.cwp_inclusive_count_file,
-                               options.cwp_function_groups_file, options.metric)
-  benchmark_set.SelectOptimalBenchmarkSet()
-
-
-if __name__ == '__main__':
-  Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/symbolize_profiles.sh b/user_activity_benchmarks/symbolize_profiles.sh
deleted file mode 100755
index 904cc1b..0000000
--- a/user_activity_benchmarks/symbolize_profiles.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses local_cwp to do the profile symbolization.
-# The profiles that need to be symbolized are placed in the profiles_path.
-# The results are placed in the local_cwp_results_path.
-
-set -e
-
-if [ "$#" -ne 3 ]; then
-  echo "USAGE: symbolize_profiles.sh profiles_path local_cwp_binary_path " \
-    "local_cwp_results_path"
-  exit 1
-fi
-
-readonly PROFILES_PATH=$1
-readonly LOCAL_CWP_BINARY_PATH=$2
-readonly LOCAL_CWP_RESULTS_PATH=$3
-readonly PROFILES=$(ls $PROFILES_PATH)
-
-for profile in "${PROFILES[@]}"
-do
-  $LOCAL_CWP_BINARY_PATH --output="$LOCAL_CWP_RESULTS_PATH/${profile}.pb.gz" \
-    "$PROFILES_PATH/$profile"
-  if [ $? -ne 0 ]; then
-    echo "Failed to symbolize the perf profile output with local_cwp for " \
-      "$profile."
-    continue
-  fi
-done
diff --git a/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68 b/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68
deleted file mode 100644
index 0177dab..0000000
--- a/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68
+++ /dev/null
@@ -1,113 +0,0 @@
-blink_perf.bindings
-blink_perf.canvas
-blink_perf.css
-blink_perf.dom
-blink_perf.events
-blink_perf.layout
-blink_perf.paint
-blink_perf.parser
-blink_perf.shadow_dom
-blink_perf.svg
-blink_style.top_25
-blob_storage.blob_storage
-dromaeo.cssqueryjquery
-dromaeo.domcoreattr
-dromaeo.domcoremodify
-dromaeo.domcorequery
-dromaeo.domcoretraverse
-dromaeo.jslibattrjquery
-dromaeo.jslibattrprototype
-dromaeo.jslibeventjquery
-dromaeo.jslibeventprototype
-dromaeo.jslibmodifyjquery
-dromaeo.jslibmodifyprototype
-dromaeo.jslibstylejquery
-dromaeo.jslibstyleprototype
-dromaeo.jslibtraversejquery
-dromaeo.jslibtraverseprototype
-dummy_benchmark.noisy_benchmark_1
-dummy_benchmark.stable_benchmark_1
-image_decoding.image_decoding_measurement
-indexeddb_perf
-jetstream
-jitter
-kraken
-media.chromeOS4kOnly.tough_video_cases
-media.chromeOS.tough_video_cases
-media.media_cns_cases
-media.mse_cases
-media.tough_video_cases_extra
-media.tough_video_cases
-memory.long_running_idle_gmail_background_tbmv2
-memory.long_running_idle_gmail_tbmv2
-memory.top_7_stress
-octane
-oilpan_gc_times.tough_animation_cases
-oortonline
-page_cycler.basic_oopif
-page_cycler.intl_hi_ru
-page_cycler.intl_ko_th_vi
-page_cycler_site_isolation.basic_oopif
-page_cycler.typical_25
-page_cycler_v2.basic_oopif
-page_cycler_v2.intl_ar_fa_he
-page_cycler_v2.intl_es_fr_pt-BR
-page_cycler_v2.intl_hi_ru
-page_cycler_v2.intl_ja_zh
-page_cycler_v2.intl_ko_th_vi
-page_cycler_v2_site_isolation.basic_oopif
-page_cycler_v2.top_10_mobile
-page_cycler_v2.typical_25
-rasterize_and_record_micro.key_mobile_sites_smooth
-rasterize_and_record_micro.key_silk_cases
-rasterize_and_record_micro.top_25_smooth
-robohornet_pro
-scheduler.tough_scheduling_cases
-service_worker.service_worker_micro_benchmark
-service_worker.service_worker
-smoothness.gpu_rasterization_and_decoding.image_decoding_cases
-smoothness.gpu_rasterization.tough_filters_cases
-smoothness.gpu_rasterization.tough_path_rendering_cases
-smoothness.image_decoding_cases
-smoothness.key_desktop_move_cases
-smoothness.scrolling_tough_ad_case
-smoothness.scrolling_tough_ad_cases
-smoothness.top_25_smooth
-smoothness.tough_ad_cases
-spaceport
-speedometer-ignition
-speedometer
-startup.cold.blank_page
-startup.large_profile.cold.blank_page
-startup.large_profile.warm.blank_page
-startup.large_profile.warm.blank
-startup.warm.blank_page
-start_with_ext.cold.blank_page
-start_with_ext.warm.blank_page
-storage.indexeddb_endure
-storage.indexeddb_endure_tracing
-sunspider
-system_health.memory_mobile
-tab_switching.five_blank_pages
-tab_switching.top_10
-tab_switching.tough_energy_cases
-tab_switching.tough_image_cases
-tab_switching.typical_25
-thread_times.tough_compositor_cases
-thread_times.tough_scrolling_cases
-top_10_mobile_memory_ignition
-top_10_mobile_memory
-tracing.tracing_with_background_memory_infra
-tracing.tracing_with_debug_overhead
-v8.browsing_mobile
-v8.detached_context_age_in_gc
-v8.google
-v8.infinite_scroll-ignition_tbmv2
-v8.infinite_scroll_tbmv2
-v8.todomvc-ignition
-v8.todomvc
-v8.top_25_smooth
-webrtc.datachannel
-webrtc.getusermedia
-webrtc.peerconnection
-webrtc.webrtc_smoothness
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof
deleted file mode 100644
index 30d4c83..0000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof
+++ /dev/null
@@ -1,3 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548,3007599556,0.81%,42.16%,13057167098,3.51%
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525,2725201614,0.73%,45.17%,3511333688,0.94%
\ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof
deleted file mode 100644
index bef9266..0000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof
+++ /dev/null
@@ -1,2 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369,4585860529,3.95%,3.95%,13583834527,11.70%
\ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof
deleted file mode 100644
index 7bac48e..0000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof
+++ /dev/null
@@ -1,4 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764,1628614163,1.64%,27.31%,1633246854,1.64%
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054,1455734663,1.46%,31.79%,1456692596,1.46%
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960,1137455802,1.14%,34.21%,1150209506,1.16%
\ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/input/cwp_function_groups.txt b/user_activity_benchmarks/testdata/input/cwp_function_groups.txt
deleted file mode 100644
index 4233d03..0000000
--- a/user_activity_benchmarks/testdata/input/cwp_function_groups.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-ab /a/b
-cd /c/d
-e /e
diff --git a/user_activity_benchmarks/testdata/input/cwp_functions_file.csv b/user_activity_benchmarks/testdata/input/cwp_functions_file.csv
deleted file mode 100644
index 6c5ed58..0000000
--- a/user_activity_benchmarks/testdata/input/cwp_functions_file.csv
+++ /dev/null
@@ -1,38 +0,0 @@
-function,file,dso,inclusive_count
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,debug/opt/google/chrome/chrome,45766441
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369
-base::MessageLoop::Run,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,debug/opt/google/chrome/chrome,41135127
-blink::StyleInvalidator::RecursionCheckpoint::RecursionCheckpoint,debug/opt/google/chrome/chrome,38403286
-base::MessageLoop::RunTask,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,debug/opt/google/chrome/chrome,38397557
-base::debug::TaskAnnotator::RunTask,/home/chrome-bot/chrome_root/src/base/debug/task_annotator.cc,debug/opt/google/chrome/chrome,38322520
-WTF::HashTableConstIterator::skipEmptyBuckets,debug/opt/google/chrome/chrome,34950293
-unpack_ubyte_b8g8r8a8_unorm /build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,34486616
-base::internal::RunnableAdapter::Run,/home/chrome-bot/chrome_root/src/base/bind_internal.h,debug/opt/google/chrome/chrome,34281237
-blink::Element::hasID /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/dom/Element.h,debug/opt/google/chrome/chrome,34237955
-blink::ElementV8Internal::idAttributeGetterCallback,debug/opt/google/chrome/chrome,32481250
-_start,,debug/opt/google/chrome/chrome,32451253
-__libc_start_main,/var/tmp/portage/cross-x86_64-cros-linux-gnu/glibc-2.19-r9/work/glibc-2.19/csu/libc-start.c,debug/lib64/libc-2.19.so,32124944
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548
-sha_transform /mnt/host/source/src/third_party/kernel/v3.10/lib/sha1.c,debug/opt/google/chrome/chrome,30615551
-ChromeMain,/home/chrome-bot/chrome_root/src/chrome/app/chrome_main.cc,debug/opt/google/chrome/chrome,30595408
-__clone,sysdeps/unix/sysv/linux/x86_64/clone.S,debug/lib64/libc-2.19.so,25480585
-start_thread,/var/tmp/portage/cross-x86_64-cros-linux-gnu/glibc-2.19-r9/work/glibc-2.19/nptl/pthread_create.c,debug/lib64/libpthread-2.19.so,24504351
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525
-base::(anonymous namespace)::ThreadFunc,/home/chrome-bot/chrome_root/src/base/threading/platform_thread_posix.cc,debug/opt/google/chrome/chrome,20700177
-base::Callback::Run,/home/chrome-bot/chrome_root/src/base/callback.h,/opt/google/chrome/chrome,20455633
-,,//anon,20220979
-SkSwizzle_RB /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,debug/opt/google/chrome/chrome,19673187
-base::MessageLoop::Run,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,19247788
-scheduler::TaskQueueManager::DoWork,/home/chrome-bot/chrome_root/src/components/scheduler/base/task_queue_manager.cc,debug/opt/google/chrome/chrome,19207528
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960
-scheduler::TaskQueueManager::ProcessTaskFromWorkQueue,/home/chrome-bot/chrome_root/src/components/scheduler/base/task_queue_manager.cc,debug/opt/google/chrome/chrome,18975400
-base::MessageLoop::DeferOrRunPendingTask,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,17864182
-,[anon],100011
-blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp,/opt/google/chrome/chrome,17862466
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054
-base::internal::Invoker::Run,/home/chrome-bot/chrome_root/src/base/bind_internal.h,/opt/google/chrome/chrome,16438965
-base::MessageLoop::DoWork,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,16029394
-base::internal::InvokeHelper::MakeItSo,/home/chrome-bot/chrome_root/src/base/bind_internal.h,/opt/google/chrome/chrome,15569953
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764
-base::Thread::ThreadMain,/home/chrome-bot/chrome_root/src/base/threading/thread.cc,debug/opt/google/chrome/chrome,15094458
-_start,,/opt/google/chrome/chrome,15014598
diff --git a/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv b/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv
deleted file mode 100644
index bc0cca6..0000000
--- a/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv
+++ /dev/null
@@ -1,8 +0,0 @@
-function,file,dso,inclusive_count,inclusive_count_fraction
-func_f,/a/b/file_f,f,1,1
-func_g,/a/b/file_g,g,2,2
-func_g,/a/b/../../a/b/file_g,g,3,2.4
-func_h,/c/d/file_h,h,4,3
-func_i,/c/d/file_i,i,5,4
-func_j,/e/file_j,j,6,5
-func_l,/e/file_l,l,7,6
diff --git a/user_activity_benchmarks/testdata/input/inclusive_count_test.csv b/user_activity_benchmarks/testdata/input/inclusive_count_test.csv
deleted file mode 100644
index c993827..0000000
--- a/user_activity_benchmarks/testdata/input/inclusive_count_test.csv
+++ /dev/null
@@ -1,8 +0,0 @@
-function,file,dso,inclusive_count,inclusive_count_fraction
-func_f,/a/b/file_f,f,1,1.1
-func_g,/a/b/file_g,g,2,2.2
-func_f,/a/b/file_f,f,3,1.2
-func_h,/c/d/../../c/d/file_h,h,1,3.3
-func_i,/c/d/file_i,i,5,4.4
-func_j,/e/file_j,j,6,5.5
-func_k,/e/file_k,k,7,6.6
diff --git a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv b/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv
deleted file mode 100644
index 7d7a49a..0000000
--- a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-parent_child_functions,child_function_file,inclusive_count
-func_f;;func_g,/a/../a/b/file_g,0.1
-func_f;;func_h,/c/d/../d/file_h,0.2
-func_f;;func_i,/c/d/file_i,0.3
-func_g;;func_j,/e/file_j,0.4
diff --git a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv b/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv
deleted file mode 100644
index a3fb72f..0000000
--- a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-parent_child_functions,child_function_file,inclusive_count
-func_f;;func_g,/a/b/file_g2,0.01
-func_f;;func_h,/c/../c/d/file_h,0.02
-func_f;;func_i,/c/../c/d/file_i,0.03
-func_g;;func_j,/e/file_j,0.4
-func_g;;func_m,/e/file_m,0.6
diff --git a/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv b/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv
deleted file mode 100644
index a4c7ced..0000000
--- a/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-function,file,dso,inclusive_count
-dummy_method1,dummy_file1/a/b/../../,dummy_object1,1
-dummy_method2,dummy_file2//,dummy_object2,2
-,,321223321,1
-dummy_method3,dummy_file3/a/../,dummy_object3,3
-dummy_method4,dummy_file4/./,dummy_object4,4
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof
deleted file mode 100644
index 62e327b..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof
+++ /dev/null
@@ -1,20 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 239632475284, 64.41% of 372058624378 total
-Dropped 33979 nodes (cum <= 1860293121)
-      flat  flat%   sum%        cum   cum%
-      115734836217 31.11% 31.11% 329503350629 88.56%  [anon]
-      9839378797  2.64% 33.75% 14384869492  3.87%  blink::v8StringToWebCoreString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp
-      6054608957  1.63% 35.38% 8069380147  2.17%  v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-      4651723038  1.25% 36.63% 8205985387  2.21%  blink::ElementV8Internal::idAttributeGetterCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
-      4569044106  1.23% 37.86% 6408862507  1.72%  blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp
-      3354819815   0.9% 38.76% 3361796139   0.9%  v8::internal::Internals::ReadField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-      3277220829  0.88% 39.64% 14077115947  3.78%  blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp
-      3225711531  0.87% 40.51% 3228415743  0.87%  v8::internal::Internals::HasHeapObjectTag /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-      3139339048  0.84% 41.35% 3144663928  0.85%  v8::internal::Bitmap::MarkBitFromIndex /home/chrome-bot/chrome_root/src/v8/src/heap/spaces.h (inline)
-      3007599556  0.81% 42.16% 13057167098  3.51%  blink::ElementV8Internal::getAttributeMethodCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
-      2907238921  0.78% 42.94% 2930031660  0.79%  v8::base::NoBarrier_Load /home/chrome-bot/chrome_root/src/v8/src/base/atomicops_internals_x86_gcc.h (inline)
-      2791274646  0.75% 43.69% 11058283504  2.97%  v8::internal::MarkCompactMarkingVisitor::VisitUnmarkedObjects /home/chrome-bot/chrome_root/src/v8/src/heap/mark-compact.cc (inline)
-      2786321388  0.75% 44.44% 2794002850  0.75%  WTF::hashInt /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashFunctions.h (inline)
-      2725201614  0.73% 45.17% 3511333688  0.94%  base::RunLoop::Run /home/chrome-bot/chrome_root/src/base/run_loop.cc
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof
deleted file mode 100644
index 6d22bff..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof
+++ /dev/null
@@ -1,17 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 48939666671, 42.14% of 116136877744 total
-Dropped 35196 nodes (cum <= 580684388)
-      flat  flat%   sum%        cum   cum%
-      4585860529  3.95%  3.95% 13583834527 11.70%  blink::InvalidationSet::invalidatesElement /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/a/b/../../InvalidationSet.cpp
-      3791928512  3.27%  7.21% 35145646088 30.26%  blink::StyleInvalidator::invalidate /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp
-      2871318565  2.47%  9.69% 2979878602  2.57%  blink::StyleInvalidator::RecursionCheckpoint::~RecursionCheckpoint /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.h (inline)
-      1914657964  1.65% 11.33% 2164475253  1.86%  WTF::StringImpl::lower /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/text/StringImpl.cpp
-      1841071698  1.59% 12.92% 13112332809 11.29%  blink::StyleInvalidator::RecursionData::matchesCurrentInvalidationSets /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp (inline)
-      1825142681  1.57% 14.49% 1828134467  1.57%  blink::StyleInvalidator::RecursionCheckpoint::RecursionCheckpoint /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.h (inline)
-      1727655605  1.49% 15.98% 1925839708  1.66%  blink::Element::hasID /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/dom/Element.h (inline)
-      1548329435  1.33% 17.31% 14927333582 12.85%  blink::StyleInvalidator::checkInvalidationSetsAgainstElement /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp (inline)
-      1429307046  1.23% 18.54% 1931177544  1.66%  WTF::HashTableConstIterator::skipEmptyBuckets /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashTable.h
-      1298665649  1.12% 19.66% 4872203383  4.20%  blink::SelectorChecker::matchSelector /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/SelectorChecker.cpp
-      1241347773  1.07% 20.73% 88048746121 75.81%  [anon]
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof
deleted file mode 100644
index 6cbf124..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof
+++ /dev/null
@@ -1,21 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 53216795676, 53.50% of 99475025143 total
-Dropped 39931 nodes (cum <= 497375125)
-      flat  flat%   sum%        cum   cum%
-      6447071173  6.48%  6.48% 6461127774  6.50%  s_mpv_mul_add_vec64 mpi/mpi_amd64_gas.s
-      5026798026  5.05% 11.53% 5033673091  5.06%  SkMulDiv255Round /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkMath.h (inline)
-      3520577246  3.54% 15.07% 4431002672  4.45%  wk_png_write_find_filter /home/chrome-bot/chrome_root/src/third_party/libpng/pngwutil.c
-      2907776944  2.92% 18.00% 3738572984  3.76%  __memcpy_sse2_unaligned ../sysdeps/x86_64/multiarch/memcpy-sse2-unaligned.S
-      2632046464  2.65% 20.64% 2636062338  2.65%  longest_match /home/chrome-bot/chrome_root/src/third_party/zlib/deflate.c (inline)
-      1699966816  1.71% 22.35% 1699966816  1.71%  _mm_set_epi32 /usr/lib/gcc/x86_64-cros-linux-gnu/4.9.x/include/emmintrin.h (inline)
-      1669101893  1.68% 24.03% 1673814801  1.68%  s_mp_sqr_comba_16 /build/gnawty/tmp/portage/dev-libs/nss-3.23-r1/work/nss-3.23/nss-.amd64/lib/freebl/mpi/mp_comba.c
-      1634108599  1.64% 25.67% 4636591817  4.66%  convert32_row /home/chrome-bot/chrome_root/src/third_party/skia/src/core/SkConfig8888.cpp
-      1628614163  1.64% 27.31% 1633246854  1.64%  SkPackARGB32 /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h (inline)
-      1541044177  1.55% 28.86% 3001680713  3.02%  convert32 /home/chrome-bot/chrome_root/src/third_party/skia/src/core/SkConfig8888.cpp (inline)
-      1458290775  1.47% 30.32% 1459976296  1.47%  SkSwizzle_RB /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h (inline)
-      1455734663  1.46% 31.79% 1456692596  1.46%  MOZ_Z_adler32 /home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c
-      1272700545  1.28% 33.07% 1858067219  1.87%  sha_transform /mnt/host/source/src/third_party/kernel/v3.10/lib/sha1.c
-      1137455802  1.14% 34.21% 1150209506  1.16%  unpack_ubyte_b8g8r8a8_unorm /build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c (inline)
-      1036731662  1.04% 35.25% 32561535338 32.73%  [anon]
diff --git a/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv b/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv
deleted file mode 100644
index 67af724..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv
+++ /dev/null
@@ -1,15 +0,0 @@
-function,file,flat,flat_p,sum_p,cum,cum_p
-v8::internal::Bitmap::MarkBitFromIndex,/home/chrome-bot/chrome_root/src/v8/src/heap/spaces.h,3139339048,0.0084,0.4135,3144663928,0.0085
-v8::base::NoBarrier_Load,/home/chrome-bot/chrome_root/src/v8/src/base/atomicops_internals_x86_gcc.h,2907238921,0.0078,0.4294,2930031660,0.0079
-v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,6054608957,0.0163,0.3538,8069380147,0.0217
-[anon],,115734836217,0.3111,0.3111,329503350629,0.8856
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,2725201614,0.0073,0.4517,3511333688,0.0094
-WTF::hashInt,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashFunctions.h,2786321388,0.0075,0.4444,2794002850,0.0075
-blink::ElementV8Internal::idAttributeGetterCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,4651723038,0.0125,0.3663,8205985387,0.0221
-v8::internal::Internals::ReadField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,3354819815,0.009,0.3876,3361796139,0.009
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,9839378797,0.0264,0.3375,14384869492,0.0387
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,4569044106,0.0123,0.3786,6408862507,0.0172
-blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp,3277220829,0.0088,0.3964,14077115947,0.0378
-v8::internal::MarkCompactMarkingVisitor::VisitUnmarkedObjects,/home/chrome-bot/chrome_root/src/v8/src/heap/mark-compact.cc,2791274646,0.0075,0.4369,11058283504,0.0297
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,3007599556,0.0081,0.4216,13057167098,0.0351
-v8::internal::Internals::HasHeapObjectTag,/home/chrome-bot/chrome_root/src/v8/include/v8.h,3225711531,0.0087,0.4051,3228415743,0.0087
diff --git a/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof b/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof
deleted file mode 100644
index 69b5606..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof
+++ /dev/null
@@ -1,29 +0,0 @@
-File: perf
-Build ID: 37750b32016528ac896fc238e0d00513e218fd9e
-Type: instructions_event
-Showing nodes accounting for 234768811461, 63.10% of 372058624378 total
-Dropped 33979 nodes (cum <= 1860293121)
-Showing top 80 nodes out of 271
-----------------------------------------------------------+-------------
-      flat  flat%   sum%        cum   cum%   calls calls% + context 	 	 
-----------------------------------------------------------+-------------
-                                       13412390629 93.24% |   blink::V8StringResource::toString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.h
-                                         437497332  3.04% |   [anon]
-                                         378465996  2.63% |   blink::V8StringResource::operator WTF::AtomicString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.h
-9839378797  2.64% 33.75% 14384869492  3.87%                | blink::v8StringToWebCoreString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp
-                                        3180428647 22.11% |   v8::String::GetExternalStringResourceBase /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-                                         514301458  3.58% |   WTF::RefPtr::RefPtr /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/RefPtr.h (inline)
-----------------------------------------------------------+-------------
-                                        8205985387   100% |   [anon]
-4651723038  1.25% 36.63% 8205985387  2.21%                | blink::ElementV8Internal::idAttributeGetterCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
-                                         717786059  8.75% |   v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-----------------------------------------------------------+-------------
-                                        6408862507   100% |   [anon]
-4569044106  1.23% 37.86% 6408862507  1.72%                | blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp
-                                         773479621 12.07% |   v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-                                         690710254 10.78% |   blink::v8SetReturnValueForMainWorld /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8Binding.h (inline)
-----------------------------------------------------------+-------------
-                                        2005371070 59.65% |   v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-                                         954968101 28.41% |   v8::String::GetExternalStringResourceBase /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-3354819815   0.9% 38.76% 3361796139   0.9%                | v8::internal::Internals::ReadField /home/chrome-bot/chrome_root/src/v8/include/v8.h
-----------------------------------------------------------+-------------
diff --git a/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv b/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv
deleted file mode 100644
index 9b15561..0000000
--- a/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-parent_function,parent_function_file,child_function,child_function_file,inclusive_count_fraction
-blink::ElementV8Internal::idAttributeGetterCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.0875
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,WTF::RefPtr::RefPtr,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/RefPtr.h,0.0358
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,v8::String::GetExternalStringResourceBase,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.2211
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,blink::v8SetReturnValueForMainWorld,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8Binding.h,0.10779999999999999
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.1207
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof
deleted file mode 100644
index 30d4c83..0000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof
+++ /dev/null
@@ -1,3 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548,3007599556,0.81%,42.16%,13057167098,3.51%
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525,2725201614,0.73%,45.17%,3511333688,0.94%
\ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof
deleted file mode 100644
index bef9266..0000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof
+++ /dev/null
@@ -1,2 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369,4585860529,3.95%,3.95%,13583834527,11.70%
\ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof
deleted file mode 100644
index 7bac48e..0000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof
+++ /dev/null
@@ -1,4 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764,1628614163,1.64%,27.31%,1633246854,1.64%
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054,1455734663,1.46%,31.79%,1456692596,1.46%
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960,1137455802,1.14%,34.21%,1150209506,1.16%
\ No newline at end of file
diff --git a/user_activity_benchmarks/utils.py b/user_activity_benchmarks/utils.py
deleted file mode 100644
index 009b241..0000000
--- a/user_activity_benchmarks/utils.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Utility functions for parsing pprof, CWP data and Chrome OS groups files."""
-
-from collections import defaultdict
-
-import csv
-import os
-import re
-
-SEPARATOR_REGEX = re.compile(r'-+\+-+')
-FUNCTION_STATISTIC_REGEX = \
-    re.compile(r'(\S+)\s+(\S+)%\s+(\S+)%\s+(\S+)\s+(\S+)%')
-CHILD_FUNCTION_PERCENTAGE_REGEX = re.compile(r'([0-9.]+)%')
-FUNCTION_KEY_SEPARATOR_REGEX = re.compile(r'\|\s+')
-# Constants used to identify if a function is common in the pprof and CWP
-# files.
-COMMON_FUNCTION = 'common'
-EXTRA_FUNCTION = 'extra'
-PARENT_CHILD_FUNCTIONS_SEPARATOR = ';;'
-# List of pairs of strings used for make substitutions in file names to make
-# CWP and pprof data consistent.
-FILE_NAME_REPLACING_PAIR_STRINGS = [('gnawty', 'BOARD'),
-                                    ('amd64-generic', 'BOARD'),
-                                    (' ../sysdeps', ',sysdeps'),
-                                    (' ../nptl', ',nptl'),
-                                    ('  aes-x86_64.s', ',aes-x86_64.s'),
-                                    (' (inline)', ''),
-                                    (' (partial-inline)', ''),
-                                    (' ../', ','),
-                                    ('../', '')]
-# Separator used to delimit the function from the file name.
-FUNCTION_FILE_SEPARATOR = ' /'
-
-
-def MakeCWPAndPprofFileNamesConsistent(file_name):
-  """Makes the CWP and pprof file names consistent.
-
-  For the same function, it may happen for some file paths to differ slightly
-  in the CWP data compared to the pprof output. In a file name, for each tuple
-  element of the list, we substitute the first element with the second one.
-
-  Args:
-    file_name: A string representing the name of the file.
-
-  Returns:
-    A string representing the modified name of tihe file.
-  """
-  file_name = file_name.replace(', ', '; ')
-  for replacing_pair_string in FILE_NAME_REPLACING_PAIR_STRINGS:
-    file_name = file_name.replace(replacing_pair_string[0],
-                                  replacing_pair_string[1])
-
-  return file_name
-
-def MakePprofFunctionKey(function_and_file_name):
-  """Creates the function key from the function and file name.
-
-  Parsing the the pprof --top and --tree outputs is difficult due to the fact
-  that it hard to extract the function and file name (i.e the function names
-  can have a lot of unexpected charachters such as spaces, operators etc).
-  For the moment, we used FUNCTION_FILE_SEPARATOR as delimiter between the
-  function and the file name. However, there are some cases where the file name
-  does not start with / and we treat this cases separately (i.e ../sysdeps,
-  ../nptl, aes-x86_64.s).
-
-  Args:
-    function_and_file_name: A string representing the function and the file name
-      as it appears in the pprof output.
-
-  Returns:
-    A string representing the function key, composed from the function and file
-    name, comma separated.
-  """
-  # TODO(evelinad): Use pprof --topproto instead of pprof --top to parse
-  # protobuffers instead of text output. Investigate if there is an equivalent
-  # for pprof --tree that gives protobuffer output.
-  #
-  # In the CWP output, we replace the , with ; as a workaround for parsing
-  # csv files. We do the same for the pprof output.
-  #
-  # TODO(evelinad): Use dremel --csv_dialect=excel-tab in the queries for
-  # replacing the , delimiter with tab.
-  function_and_file_name = function_and_file_name.replace(', ', '; ')
-  # If the function and file name sequence contains the FUNCTION_FILE_SEPARATOR,
-  # we normalize the path name of the file and make the string subtitutions
-  # to make the CWP and pprof data  consistent. The returned key is composed
-  # from the function name and normalized file path name, separated by a comma.
-  # If the function and file name does not contain the FUNCTION_FILE_SEPARATOR,
-  # we just do the strings substitution.
-  if FUNCTION_FILE_SEPARATOR in function_and_file_name:
-    function_name, file_name = \
-        function_and_file_name.split(FUNCTION_FILE_SEPARATOR)
-    file_name = \
-        MakeCWPAndPprofFileNamesConsistent(os.path.normpath("/" + file_name))
-    return ','.join([function_name, file_name])
-
-  return MakeCWPAndPprofFileNamesConsistent(function_and_file_name)
-
-
-def ComputeCWPCummulativeInclusiveStatistics(cwp_inclusive_count_statistics):
-  """Computes the cumulative inclusive count value of a function.
-
-  A function might appear declared in multiple files or objects. When
-  computing the fraction of the inclusive count value from a child function to
-  the parent function, we take into consideration the sum of the
-  inclusive_count
-  count values from all the ocurences of that function.
-
-  Args:
-    cwp_inclusive_count_statistics: A dict containing the inclusive count
-    statistics extracted by the ParseCWPInclusiveCountFile method.
-
-  Returns:
-    A dict having as a ket the name of the function and as a value the sum of
-    the inclusive count values of the occurences of the functions from all
-    the files and objects.
-  """
-  cwp_inclusive_count_statistics_cumulative = defaultdict(int)
-
-  for function_key, function_statistics \
-      in cwp_inclusive_count_statistics.iteritems():
-    function_name, _ = function_key.split(',')
-    cwp_inclusive_count_statistics_cumulative[function_name] += \
-        function_statistics[1]
-
-  return cwp_inclusive_count_statistics_cumulative
-
-def ComputeCWPChildFunctionsFractions(cwp_inclusive_count_statistics_cumulative,
-                                      cwp_pairwise_inclusive_count_statistics):
-  """Computes the fractions of the inclusive count values for child functions.
-
-  The fraction represents the inclusive count value of a child function over
-  the one of the parent function.
-
-  Args:
-    cwp_inclusive_count_statistics_cumulative: A dict containing the
-      cumulative inclusive count values of the CWP functions.
-    cwp_pairwise_inclusive_count_statistics: A dict containing the inclusive
-      count statistics for pairs of parent and child functions. The key is the
-      parent function. The value is a dict with the key the name of the child
-      function and the file name, comma separated, and the value is the
-      inclusive count value of the pair of parent and child functions.
-
-  Returns:
-      A dict containing the inclusive count statistics for pairs of parent
-      and child functions. The key is the parent function. The value is a
-      dict with the key the name of the child function and the file name,
-      comma separated, and the value is the inclusive count fraction of the
-      child function out of the parent function.
-  """
-
-  pairwise_inclusive_count_fractions = {}
-
-  for parent_function_key, child_functions_metrics in \
-      cwp_pairwise_inclusive_count_statistics.iteritems():
-    child_functions_fractions = {}
-    parent_function_inclusive_count = \
-    cwp_inclusive_count_statistics_cumulative.get(parent_function_key, 0.0)
-
-    if parent_function_key in cwp_inclusive_count_statistics_cumulative:
-      for child_function_key, child_function_inclusive_count \
-          in child_functions_metrics.iteritems():
-        child_functions_fractions[child_function_key] = \
-           child_function_inclusive_count / parent_function_inclusive_count
-    else:
-      for child_function_key, child_function_inclusive_count \
-          in child_functions_metrics.iteritems():
-        child_functions_fractions[child_function_key] = 0.0
-    pairwise_inclusive_count_fractions[parent_function_key] = \
-        child_functions_fractions
-
-  return pairwise_inclusive_count_fractions
-
-def ParseFunctionGroups(cwp_function_groups_lines):
-  """Parses the contents of the function groups file.
-
-  Args:
-    cwp_function_groups_lines: A list of the lines contained in the CWP
-      function groups file. A line contains the group name and the file path
-      that describes the group, separated by a space.
-
-  Returns:
-    A list of tuples containing the group name and the file path.
-  """
-  # The order of the groups mentioned in the cwp_function_groups file
-  # matters. A function declared in a file will belong to the first
-  # mentioned group that matches its path to the one of the file.
-  # It is possible to have multiple paths that belong to the same group.
-  return [tuple(line.split()) for line in cwp_function_groups_lines]
-
-
-def ParsePprofTopOutput(file_name):
-  """Parses a file that contains the output of the pprof --top command.
-
-  Args:
-    file_name: The name of the file containing the pprof --top output.
-
-  Returns:
-    A dict having as a key the name of the function and the file containing
-    the declaration of the function, separated by a comma, and as a value
-    a tuple containing the flat, flat percentage, sum percentage, cummulative
-    and cummulative percentage values.
-  """
-
-  pprof_top_statistics = {}
-
-  # In the pprof top output, the statistics of the functions start from the
-  # 6th line.
-  with open(file_name) as input_file:
-    pprof_top_content = input_file.readlines()[6:]
-
-  for line in pprof_top_content:
-    function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
-    flat, flat_p, sum_p, cum, cum_p = function_statistic_match.groups()
-    flat_p = str(float(flat_p) / 100.0)
-    sum_p = str(float(sum_p) / 100.0)
-    cum_p = str(float(cum_p) / 100.0)
-    lookup_index = function_statistic_match.end()
-    function_and_file_name = line[lookup_index + 2 : -1]
-    key = MakePprofFunctionKey(function_and_file_name)
-    pprof_top_statistics[key] = (flat, flat_p, sum_p, cum, cum_p)
-  return pprof_top_statistics
-
-
-def ParsePprofTreeOutput(file_name):
-  """Parses a file that contains the output of the pprof --tree command.
-
-  Args:
-    file_name: The name of the file containing the pprof --tree output.
-
-  Returns:
-    A dict including the statistics for pairs of parent and child functions.
-    The key is the name of the parent function and the file where the
-    function is declared, separated by a comma. The value is a dict having as
-    a key the name of the child function and the file where the function is
-    delcared, comma separated and as a value the percentage of time the
-    parent function spends in the child function.
-  """
-
-  # In the pprof output, the statistics of the functions start from the 9th
-  # line.
-  with open(file_name) as input_file:
-    pprof_tree_content = input_file.readlines()[9:]
-
-  pprof_tree_statistics = defaultdict(lambda: defaultdict(float))
-  track_child_functions = False
-
-  # The statistics of a given function, its parent and child functions are
-  # included between two separator marks.
-  # All the parent function statistics are above the line containing the
-  # statistics of the given function.
-  # All the statistics of a child function are below the statistics of the
-  # given function.
-  # The statistics of a parent or a child function contain the calls, calls
-  # percentage, the function name and the file where the function is declared.
-  # The statistics of the given function contain the flat, flat percentage,
-  # sum percentage, cummulative, cummulative percentage, function name and the
-  # name of the file containing the declaration of the function.
-  for line in pprof_tree_content:
-    separator_match = SEPARATOR_REGEX.search(line)
-
-    if separator_match:
-      track_child_functions = False
-      continue
-
-    parent_function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
-
-    if parent_function_statistic_match:
-      track_child_functions = True
-      lookup_index = parent_function_statistic_match.end()
-      parent_function_key_match = \
-          FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
-      lookup_index = parent_function_key_match.end()
-      parent_function_key = MakePprofFunctionKey(line[lookup_index:-1])
-      continue
-
-    if not track_child_functions:
-      continue
-
-    child_function_statistic_match = \
-        CHILD_FUNCTION_PERCENTAGE_REGEX.search(line)
-    child_function_percentage = \
-        float(child_function_statistic_match.group(1))
-    lookup_index = child_function_statistic_match.end()
-    child_function_key_match = \
-        FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
-    lookup_index = child_function_key_match.end()
-    child_function_key = MakePprofFunctionKey(line[lookup_index:-1])
-
-    pprof_tree_statistics[parent_function_key][child_function_key] += \
-        child_function_percentage / 100.0
-
-  return pprof_tree_statistics
-
-
-def ParseCWPInclusiveCountFile(file_name):
-  """Parses the CWP inclusive count files.
-
-  A line should contain the name of the function, the file name with the
-  declaration, the inclusive count and inclusive count fraction out of the
-  total extracted inclusive count values.
-
-  Args:
-    file_name: The file containing the inclusive count values of the CWP
-    functions.
-
-  Returns:
-    A dict containing the inclusive count statistics. The key is the name of
-    the function and the file name, comma separated. The value represents a
-    tuple with the object name containing the function declaration, the
-    inclusive count and inclusive count fraction values, and a marker to
-    identify if the function is present in one of the benchmark profiles.
-  """
-  cwp_inclusive_count_statistics = defaultdict(lambda: ('', 0, 0.0, 0))
-
-  with open(file_name) as input_file:
-    statistics_reader = csv.DictReader(input_file, delimiter=',')
-    for statistic in statistics_reader:
-      function_name = statistic['function']
-      file_name = MakeCWPAndPprofFileNamesConsistent(
-          os.path.normpath(statistic['file']))
-      dso_name = statistic['dso']
-      inclusive_count = statistic['inclusive_count']
-      inclusive_count_fraction = statistic['inclusive_count_fraction']
-
-      # We ignore the lines that have empty fields(i.e they specify only the
-      # addresses of the functions and the inclusive counts values).
-      if all([
-          function_name, file_name, dso_name, inclusive_count,
-          inclusive_count_fraction
-      ]):
-        key = '%s,%s' % (function_name, file_name)
-
-        # There might be situations where a function appears in multiple files
-        # or objects. Such situations can occur when in the Dremel queries there
-        # are not specified the Chrome OS version and the name of the board (i.e
-        # the files can belong to different kernel or library versions).
-        inclusive_count_sum = \
-            cwp_inclusive_count_statistics[key][1] + int(inclusive_count)
-        inclusive_count_fraction_sum = \
-            cwp_inclusive_count_statistics[key][2] + \
-            float(inclusive_count_fraction)
-
-        # All the functions are initially marked as EXTRA_FUNCTION.
-        value = \
-            (dso_name, inclusive_count_sum, inclusive_count_fraction_sum,
-             EXTRA_FUNCTION)
-        cwp_inclusive_count_statistics[key] = value
-
-  return cwp_inclusive_count_statistics
-
-
-def ParseCWPPairwiseInclusiveCountFile(file_name):
-  """Parses the CWP pairwise inclusive count files.
-
-  A line of the file should contain a pair of a parent and a child function,
-  concatenated by the PARENT_CHILD_FUNCTIONS_SEPARATOR, the name of the file
-  where the child function is declared and the inclusive count fractions of
-  the pair of functions out of the total amount of inclusive count values.
-
-  Args:
-    file_name: The file containing the pairwise inclusive_count statistics of
-      the
-    CWP functions.
-
-  Returns:
-    A dict containing the statistics of the parent functions and each of
-    their child functions. The key of the dict is the name of the parent
-    function. The value is a dict having as a key the name of the child
-    function with its file name separated by a ',' and as a value the
-    inclusive count value of the parent-child function pair.
-  """
-  pairwise_inclusive_count_statistics = defaultdict(lambda: defaultdict(float))
-
-  with open(file_name) as input_file:
-    statistics_reader = csv.DictReader(input_file, delimiter=',')
-
-    for statistic in statistics_reader:
-      parent_function_name, child_function_name = \
-          statistic['parent_child_functions'].split(
-              PARENT_CHILD_FUNCTIONS_SEPARATOR)
-      child_function_file_name = MakeCWPAndPprofFileNamesConsistent(
-          os.path.normpath(statistic['child_function_file']))
-      inclusive_count = statistic['inclusive_count']
-
-      # There might be situations where a child function appears in
-      # multiple files or objects. Such situations can occur when in the
-      # Dremel queries are not specified the Chrome OS version and the
-      # name of the board (i.e the files can belong to different kernel or
-      # library versions), when the child function is a template function
-      # that is declared in a header file or there are name collisions
-      # between multiple executable objects.
-      # If a pair of child and parent functions appears multiple times, we
-      # add their inclusive count values.
-      child_function_key = ','.join(
-          [child_function_name, child_function_file_name])
-      pairwise_inclusive_count_statistics[parent_function_name] \
-          [child_function_key] += float(inclusive_count)
-
-  return pairwise_inclusive_count_statistics
diff --git a/user_activity_benchmarks/utils_unittest.py b/user_activity_benchmarks/utils_unittest.py
deleted file mode 100755
index 31bf83d..0000000
--- a/user_activity_benchmarks/utils_unittest.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the utility module."""
-
-import collections
-import csv
-import unittest
-
-import utils
-
-
-class UtilsTest(unittest.TestCase):
-  """Test class for utility module."""
-
-  def __init__(self, *args, **kwargs):
-    super(UtilsTest, self).__init__(*args, **kwargs)
-    self._pprof_top_csv_file = 'testdata/input/pprof_top_csv/file1.csv'
-    self._pprof_top_file = 'testdata/input/pprof_top/file1.pprof'
-    self._pprof_tree_csv_file = 'testdata/input/pprof_tree_csv/file1.csv'
-    self._pprof_tree_file = 'testdata/input/pprof_tree/file1.pprof'
-    self._pairwise_inclusive_count_test_file = \
-        'testdata/input/pairwise_inclusive_count_test.csv'
-    self._pairwise_inclusive_count_reference_file = \
-        'testdata/input/pairwise_inclusive_count_reference.csv'
-    self._inclusive_count_test_file = \
-       'testdata/input/inclusive_count_test.csv'
-    self._inclusive_count_reference_file = \
-        'testdata/input/inclusive_count_reference.csv'
-
-  def testParseFunctionGroups(self):
-    cwp_function_groups_lines = \
-        ['group1 /a\n', 'group2 /b\n', 'group3 /c\n', 'group4 /d\n']
-    expected_output = [('group1', '/a'), ('group2', '/b'), ('group3', '/c'),
-                       ('group4', '/d')]
-    result = utils.ParseFunctionGroups(cwp_function_groups_lines)
-
-    self.assertListEqual(expected_output, result)
-
-  def testParsePProfTopOutput(self):
-    result_pprof_top_output = utils.ParsePprofTopOutput(self._pprof_top_file)
-    expected_pprof_top_output = {}
-
-    with open(self._pprof_top_csv_file) as input_file:
-      statistics_reader = csv.DictReader(input_file, delimiter=',')
-
-      for statistic in statistics_reader:
-        if statistic['file']:
-          function_key = ','.join([statistic['function'], statistic['file']])
-        else:
-          function_key = statistic['function']
-        expected_pprof_top_output[function_key] = \
-            (statistic['flat'], statistic['flat_p'], statistic['sum_p'],
-             statistic['cum'], statistic['cum_p'])
-
-    self.assertDictEqual(result_pprof_top_output, expected_pprof_top_output)
-
-  def testParsePProfTreeOutput(self):
-    result_pprof_tree_output = utils.ParsePprofTreeOutput(self._pprof_tree_file)
-    expected_pprof_tree_output = collections.defaultdict(dict)
-
-    with open(self._pprof_tree_csv_file) as input_file:
-      statistics_reader = csv.DictReader(input_file, delimiter=',')
-
-      for statistic in statistics_reader:
-        parent_function_key = \
-            ','.join([statistic['parent_function'],
-                      statistic['parent_function_file']])
-        child_function_key = \
-            ','.join([statistic['child_function'],
-                      statistic['child_function_file']])
-
-        expected_pprof_tree_output[parent_function_key][child_function_key] = \
-            float(statistic['inclusive_count_fraction'])
-
-    self.assertDictEqual(result_pprof_tree_output, expected_pprof_tree_output)
-
-  def testParseCWPInclusiveCountFile(self):
-    expected_inclusive_statistics_test = \
-        {'func_i,/c/d/file_i': ('i', 5, 4.4, utils.EXTRA_FUNCTION),
-         'func_j,/e/file_j': ('j', 6, 5.5, utils.EXTRA_FUNCTION),
-         'func_f,/a/b/file_f': ('f', 4, 2.3, utils.EXTRA_FUNCTION),
-         'func_h,/c/d/file_h': ('h', 1, 3.3, utils.EXTRA_FUNCTION),
-         'func_k,/e/file_k': ('k', 7, 6.6, utils.EXTRA_FUNCTION),
-         'func_g,/a/b/file_g': ('g', 2, 2.2, utils.EXTRA_FUNCTION)}
-    expected_inclusive_statistics_reference = \
-        {'func_i,/c/d/file_i': ('i', 5, 4.0, utils.EXTRA_FUNCTION),
-         'func_j,/e/file_j': ('j', 6, 5.0, utils.EXTRA_FUNCTION),
-         'func_f,/a/b/file_f': ('f', 1, 1.0, utils.EXTRA_FUNCTION),
-         'func_l,/e/file_l': ('l', 7, 6.0, utils.EXTRA_FUNCTION),
-         'func_h,/c/d/file_h': ('h', 4, 3.0, utils.EXTRA_FUNCTION),
-         'func_g,/a/b/file_g': ('g', 5, 4.4, utils.EXTRA_FUNCTION)}
-    result_inclusive_statistics_test = \
-        utils.ParseCWPInclusiveCountFile(self._inclusive_count_test_file)
-    result_inclusive_statistics_reference = \
-        utils.ParseCWPInclusiveCountFile(self._inclusive_count_reference_file)
-
-    self.assertDictEqual(result_inclusive_statistics_test,
-                         expected_inclusive_statistics_test)
-    self.assertDictEqual(result_inclusive_statistics_reference,
-                         expected_inclusive_statistics_reference)
-
-  def testParseCWPPairwiseInclusiveCountFile(self):
-    expected_pairwise_inclusive_statistics_test = {
-        'func_f': {'func_g,/a/b/file_g2': 0.01,
-                   'func_h,/c/d/file_h': 0.02,
-                   'func_i,/c/d/file_i': 0.03},
-        'func_g': {'func_j,/e/file_j': 0.4,
-                   'func_m,/e/file_m': 0.6}
-    }
-    expected_pairwise_inclusive_statistics_reference = {
-        'func_f': {'func_g,/a/b/file_g': 0.1,
-                   'func_h,/c/d/file_h': 0.2,
-                   'func_i,/c/d/file_i': 0.3},
-        'func_g': {'func_j,/e/file_j': 0.4}
-    }
-    result_pairwise_inclusive_statistics_test = \
-        utils.ParseCWPPairwiseInclusiveCountFile(
-            self._pairwise_inclusive_count_test_file)
-    result_pairwise_inclusive_statistics_reference = \
-        utils.ParseCWPPairwiseInclusiveCountFile(
-            self._pairwise_inclusive_count_reference_file)
-
-    self.assertDictEqual(result_pairwise_inclusive_statistics_test,
-                         expected_pairwise_inclusive_statistics_test)
-    self.assertDictEqual(result_pairwise_inclusive_statistics_reference,
-                         expected_pairwise_inclusive_statistics_reference)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/verify_compiler.py b/verify_compiler.py
index 9eafbb8..b70c125 100755
--- a/verify_compiler.py
+++ b/verify_compiler.py
@@ -34,13 +34,13 @@
   return retval
 
 
-def FindAllFiles(root_dir, cmd_executer):
+def FindAllFiles(root_dir):
   """Create a list of all the *.debug and *.dwp files to be checked."""
 
   file_list = []
   tmp_list = [
       os.path.join(dirpath, f)
-      for dirpath, dirnames, files in os.walk(root_dir)
+      for dirpath, _, files in os.walk(root_dir)
       for f in fnmatch.filter(files, '*.debug')
   ]
   for f in tmp_list:
@@ -48,7 +48,7 @@
       file_list.append(f)
   tmp_list = [
       os.path.join(dirpath, f)
-      for dirpath, dirnames, files in os.walk(root_dir)
+      for dirpath, _, files in os.walk(root_dir)
       for f in fnmatch.filter(files, '*.dwp')
   ]
   file_list += tmp_list
@@ -99,8 +99,8 @@
   status = CreateTmpDwarfFile(filename, dwarf_file, cmd_executer)
 
   if status != 0:
-    print('Unable to create dwarf file for %s (status: %d).' %
-          (filename, status))
+    print('Unable to create dwarf file for %s (status: %d).' % (filename,
+                                                                status))
     return status
 
   comp_str = COMPILER_STRINGS[compiler]
@@ -121,8 +121,8 @@
         if 'DW_AT_name' in line:
           words = line.split(':')
           bad_file = words[-1]
-          print('FAIL:  %s was not compiled with %s.' %
-                (bad_file.rstrip(), compiler))
+          print('FAIL:  %s was not compiled with %s.' % (bad_file.rstrip(),
+                                                         compiler))
           looking_for_name = False
         elif 'DW_TAG_' in line:
           looking_for_name = False
@@ -189,7 +189,7 @@
   if filename:
     file_list.append(filename)
   else:
-    file_list = FindAllFiles(root_dir, cmd_executer)
+    file_list = FindAllFiles(root_dir)
 
   bad_files = []
   unknown_files = []
@@ -222,9 +222,8 @@
       for f in bad_files:
         print(f)
       if len(unknown_files) > 0:
-        print(
-            '\n\nUnable to verify the following files (no debug info in them):\n'
-        )
+        print('\n\nUnable to verify the following files (no debug info in '
+              'them):\n')
         for f in unknown_files:
           print(f)
     return 1
diff --git a/weekly_report.py b/weekly_report.py
index e74c623..01db867 100755
--- a/weekly_report.py
+++ b/weekly_report.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
 #
 # Copyright Google Inc. 2014
 """Module to generate the 7-day crosperf reports."""
@@ -55,8 +55,8 @@
   chromeos_root: %s
   chromeos_image: %s
 }
-""" % (test_name, chromeos_root, os.path.join(test_path,
-                                              'chromiumos_test_image.bin'))
+""" % (test_name, chromeos_root,
+       os.path.join(test_path, 'chromiumos_test_image.bin'))
       f.write(test_image)
 
   return filename
@@ -109,8 +109,8 @@
   chromeos_root: %s
   chromeos_image: %s
 }
-""" % (test_name, chromeos_root, os.path.join(test_path,
-                                              'chromiumos_test_image.bin'))
+""" % (test_name, chromeos_root,
+       os.path.join(test_path, 'chromiumos_test_image.bin'))
       f.write(test_image)
 
   return filename
@@ -121,13 +121,14 @@
   parser = argparse.ArgumentParser()
   parser.add_argument('-b', '--board', dest='board', help='Target board.')
   parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
-  parser.add_argument('-v',
-                      '--vanilla_only',
-                      dest='vanilla_only',
-                      action='store_true',
-                      default=False,
-                      help='Generate a report comparing only the vanilla '
-                      'images.')
+  parser.add_argument(
+      '-v',
+      '--vanilla_only',
+      dest='vanilla_only',
+      action='store_true',
+      default=False,
+      help='Generate a report comparing only the vanilla '
+      'images.')
 
   options = parser.parse_args(argv[1:])
 
@@ -200,8 +201,8 @@
   timestamp = datetime.datetime.strftime(datetime.datetime.now(),
                                          '%Y-%m-%d_%H:%M:%S')
   results_dir = os.path.join(
-      os.path.expanduser('~/nightly_test_reports'), '%s.%s' % (
-          timestamp, options.board), 'weekly_tests')
+      os.path.expanduser('~/nightly_test_reports'),
+      '%s.%s' % (timestamp, options.board), 'weekly_tests')
 
   for day in WEEKDAYS:
     startdir = os.path.join(constants.CROSTC_WORKSPACE, day)
@@ -232,8 +233,8 @@
 
   # Run Crosperf on the file to generate the weekly report.
   cmd = ('%s/toolchain-utils/crosperf/crosperf '
-         '%s --no_email=True --results_dir=%s' %
-         (constants.CROSTC_WORKSPACE, filename, results_dir))
+         '%s --no_email=True --results_dir=%s' % (constants.CROSTC_WORKSPACE,
+                                                  filename, results_dir))
   retv = cmd_executer.RunCommand(cmd)
   if retv == 0:
     # Send the email, if the crosperf command worked.