Merge remote-tracking branch 'aosp/mirror-chromium-master' into update_utils
am: 310ded5b2f

Change-Id: I7654dd1514482bfc3df1bcb4b9c617e00dfb8190
diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini
index b718f38..0afba42 100644
--- a/COMMIT-QUEUE.ini
+++ b/COMMIT-QUEUE.ini
@@ -7,6 +7,8 @@
 
 [GENERAL]
 
-# This repository isn't used by the CQ at all, so we can submit in the
-# pre-cq.
-submit-in-pre-cq: yes
+# Stages to ignore in the commit queue. If these steps break, your CL will be
+# submitted anyway. Use with caution.
+
+# Files in toolchain-utils repo do not impact any HW/VM tests.
+ignored-stages: HWTest VMTest
diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py
index 1d9853a..4f91b04 100755
--- a/auto_delete_nightly_test_data.py
+++ b/auto_delete_nightly_test_data.py
@@ -127,33 +127,9 @@
   rv2 = 0
   ce = command_executer.GetCommandExecuter()
   minutes = 1440 * days_to_preserve
-  # Clean image tar files, which were last accessed 1 hour ago and clean image
-  # bin files that were last accessed more than specified time.
-  cmd = ('find {0}/*{1} -type f '
-         r'\( -name "chromiumos_test_image.tar"    -amin +60 -o '
-         r'   -name "chromiumos_test_image.tar.xz" -amin +60 -o '
-         r'   -name "chromiumos_test_image.bin"    -amin +{2} \) '
-         r'-exec bash -c "echo rm -f {{}}" \; '
-         r'-exec bash -c "rm -f {{}}" \;').format(chroot_tmp, subdir_suffix,
-                                                  minutes)
-
-  if dry_run:
-    print('Going to execute:\n%s' % cmd)
-  else:
-    rv2 = ce.RunCommand(cmd, print_to_console=False)
-    if rv2 == 0:
-      print('Successfully cleaned chromeos images from '
-            '"{0}/*{1}".'.format(chroot_tmp, subdir_suffix))
-    else:
-      print('Some chromeos images were not removed from '
-            '"{0}/*{1}".'.format(chroot_tmp, subdir_suffix))
-
-  rv += rv2
-
-  # Clean autotest files that were last accessed more than specified time.
+  # Clean files that were last accessed more than the specified time.
   rv2 = 0
-  cmd = (r'find {0}/*{1} -maxdepth 2 -type d '
-         r'\( -name "autotest_files" \) '
+  cmd = (r'find {0}/*{1}/* -maxdepth 1 -type d '
          r'-amin +{2} '
          r'-exec bash -c "echo rm -fr {{}}" \; '
          r'-exec bash -c "rm -fr {{}}" \;').format(chroot_tmp, subdir_suffix,
@@ -179,6 +155,9 @@
                                      'chroot', 'tmp')
   # Clean files in tmp directory
   rv = CleanChromeOsTmpFiles(chromeos_chroot_tmp, days_to_preserve, dry_run)
+  # Clean image files in *-tryjob directories
+  rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-tryjob',
+                                days_to_preserve, dry_run)
   # Clean image files in *-release directories
   rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-release',
                                 days_to_preserve, dry_run)
diff --git a/binary_search_tool/README.bisect b/binary_search_tool/README.bisect
index e6185e8..49e0c08 100644
--- a/binary_search_tool/README.bisect
+++ b/binary_search_tool/README.bisect
@@ -1,10 +1,11 @@
 
-bisect.py is a wrapper around the general purpose binary_search_state.py. It
-provides a user friendly interface for bisecting various compilation errors.
-The 2 currently provided methods of bisecting are ChromeOS package and object
-bisection. Each method defines a default set of options to pass to
-binary_search_state.py and allow the user to override these defaults (see
-the "Overriding" section).
+bisect.py is a wrapper around the general purpose
+binary_search_state.py. It provides a user friendly interface for
+bisecting various compilation errors.  The 2 currently provided
+methods of bisecting are ChromeOS package and object bisection. Each
+method defines a default set of options to pass to
+binary_search_state.py and allow the user to override these defaults
+(see the "Overriding" section).
 
 ** NOTE **
 All commands, examples, scripts, etc. are to be run from your chroot unless
@@ -31,8 +32,9 @@
       /build/${board}.work - A full copy of /build/${board}.bad
 
   b) Cleanup:
-    bisect.py does most cleanup for you, the only thing required by the user is
-    to cleanup all built images and the three build trees made in /build/
+    bisect.py does most cleanup for you, the only
+    thing required by the user is to cleanup all built images and the
+    three build trees made in /build/
 
   c) Default Arguments:
     --get_initial_items='cros_pkg/get_initial_items.sh'
@@ -187,16 +189,18 @@
         --test_script=sysroot_wrapper/boot_test.sh
 
 Resuming:
-  bisect.py and binary_search_state.py offer the ability to resume a bisection
-  in case it was interrupted by a SIGINT, power failure, etc. Every time the
-  tool completes a bisection iteration its state is saved to disk (usually to
-  the file "./bisect.py.state"). If passed the --resume option, the tool
+  bisect.py and binary_search_state.py offer the
+  ability to resume a bisection in case it was interrupted by a
+  SIGINT, power failure, etc. Every time the tool completes a
+  bisection iteration its state is saved to disk (usually to the file
+  "./bisect_driver.py.state"). If passed the --resume option, the tool
   it will automatically detect the state file and resume from the last
   completed iteration.
 
 Overriding:
-  You can run ./bisect.py --help or ./binary_search_state.py --help for a full
-  list of arguments that can be overriden. Here are a couple of examples:
+  You can run ./bisect.py --help or ./binary_search_state.py
+  --help for a full list of arguments that can be overriden. Here are
+  a couple of examples:
 
   Example 1 (do boot test instead of interactive test):
   ./bisect.py package daisy 172.17.211.182 --test_script=cros_pkg/boot_test.sh
diff --git a/binary_search_tool/android/cleanup.sh b/binary_search_tool/android/cleanup.sh
index c89c337..759b3ed 100755
--- a/binary_search_tool/android/cleanup.sh
+++ b/binary_search_tool/android/cleanup.sh
@@ -9,3 +9,7 @@
 #
 
 rm android/common.sh
+# Remove build command script if pass_bisect enabled
+rm -f android/cmd_script.sh
+# Remove tmp IR file used for ir_diff in pass beisction
+rm -f /tmp/bisection_bad_item.o
diff --git a/binary_search_tool/android/generate_cmd.sh b/binary_search_tool/android/generate_cmd.sh
new file mode 100755
index 0000000..78a39b1
--- /dev/null
+++ b/binary_search_tool/android/generate_cmd.sh
@@ -0,0 +1,60 @@
+#!/bin/bash -eu
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script extracts command line options to build bad item.
+# The generated script will be used by pass level bisection.
+#
+
+source android/common.sh
+
+abs_path=$1
+
+# The item will be `-o relative-path-to-object `, which will be used
+# for seeking command in populate log.
+# We care about the `-o` at the beginning and ` ` at the end are necessary,
+# so that we can get build command for exact this object file.
+# Example: prebuilt/../clang++ -O3 -MF obj1.o.d -o obj.o obj.cpp
+# We should count this command as one to build obj.o, not obj1.o.d.
+real_path=$(realpath --relative-to="${BISECT_WORK_BUILD}" "${abs_path}")
+item="-o $real_path "
+
+populate_log=${BISECT_BAD_BUILD}/_POPULATE_LOG
+
+output='#!/bin/bash -u\n'
+output+='source android/common.sh\n'
+
+result=$(egrep -m 1 -- "${item}" ${populate_log})
+
+# Re-generate bad item to tmp directory location
+tmp_ir='/tmp/bisection_bad_item.o'
+result=$(sed "s|$item|-o $tmp_ir |g" <<< ${result})
+
+# Remove `:` after cd command
+result=$(sed 's|cd:|cd|g' <<< ${result})
+
+# Add environment variable which helps pass level bisection
+result=$(sed 's| -o | $LIMIT_FLAGS -o |g' <<< ${result})
+
+output+=${result}
+
+# Symbolic link generated bad item to original object
+output+="\nln -f $tmp_ir $abs_path"
+output+="\ntouch $abs_path"
+
+echo -e "${output}" > android/cmd_script.sh
+
+chmod u+x android/cmd_script.sh
+
+echo 'Script created as android/cmd_script.sh'
+
+# Check if compiler is LLVM.
+if grep -q "clang" android/cmd_script.sh
+then
+    exit 0
+else
+    echo 'Pass/transformation level bisection only works for LLVM compiler.'
+    exit 1
+fi
\ No newline at end of file
diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py
index aaa09ee..a4f8c1c 100755
--- a/binary_search_tool/binary_search_perforce.py
+++ b/binary_search_tool/binary_search_perforce.py
@@ -1,4 +1,8 @@
 #!/usr/bin/env python2
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 """Module of binary serch for perforce."""
 from __future__ import print_function
 
@@ -63,6 +67,66 @@
     self.tag = tag
 
 
+class BinarySearcherForPass(object):
+  """Class of pass level binary searcher."""
+
+  def __init__(self, logger_to_set=None):
+    self.current = 0
+    self.lo = 0
+    self.hi = 0
+    self.total = 0
+    if logger_to_set is not None:
+      self.logger = logger_to_set
+    else:
+      self.logger = logger.GetLogger()
+
+  def GetNext(self):
+    # For the first run, update self.hi with total pass/transformation count
+    if self.hi == 0:
+      self.hi = self.total
+    self.current = (self.hi + self.lo) / 2
+    message = ('Bisecting between: (%d, %d)' % (self.lo, self.hi))
+    self.logger.LogOutput(message, print_to_console=verbose)
+    message = ('Current limit number: %d' % self.current)
+    self.logger.LogOutput(message, print_to_console=verbose)
+    return self.current
+
+  def SetStatus(self, status):
+    """Set lo/hi status based on test script result
+
+    If status == 0, it means that runtime error is not introduced until current
+    pass/transformation, so we need to increase lower bound for binary search.
+
+    If status == 1, it means that runtime error still happens with current pass/
+    transformation, so we need to decrease upper bound for binary search.
+
+    Return:
+      True if we find the bad pass/transformation, or cannot find bad one after
+      decreasing to the first pass/transformation. Otherwise False.
+    """
+    assert status == 0 or status == 1 or status == 125
+
+    if self.current == 0:
+      message = ('Runtime error occurs before first pass/transformation. '
+                 'Stop binary searching.')
+      self.logger.LogOutput(message, print_to_console=verbose)
+      return True
+
+    if status == 0:
+      message = ('Runtime error is not reproduced, increasing lower bound.')
+      self.logger.LogOutput(message, print_to_console=verbose)
+      self.lo = self.current + 1
+    elif status == 1:
+      message = ('Runtime error is reproduced, decreasing upper bound..')
+      self.logger.LogOutput(message, print_to_console=verbose)
+      self.hi = self.current
+
+    if self.lo >= self.hi:
+      return True
+
+    return False
+
+
 class BinarySearcher(object):
   """Class of binary searcher."""
 
@@ -167,9 +231,8 @@
       self.GetNextFlakyBinary()
 
     # TODO: Add an estimated time remaining as well.
-    message = ('Estimated tries: min: %d max: %d\n' %
-               (1 + math.log(self.hi - self.lo, 2),
-                self.hi - self.lo - len(self.skipped_indices)))
+    message = ('Estimated tries: min: %d max: %d\n' % (1 + math.log(
+        self.hi - self.lo, 2), self.hi - self.lo - len(self.skipped_indices)))
     self.logger.LogOutput(message, print_to_console=verbose)
     message = ('lo: %d hi: %d current: %d version: %s\n' %
                (self.lo, self.hi, self.current, self.sorted_list[self.current]))
@@ -186,8 +249,8 @@
   def GetAllPoints(self):
     to_return = ''
     for i in range(len(self.sorted_list)):
-      to_return += ('%d %d %s\n' % (self.points[i].status, i,
-                                    self.points[i].revision))
+      to_return += (
+          '%d %d %s\n' % (self.points[i].status, i, self.points[i].revision))
 
     return to_return
 
@@ -276,8 +339,9 @@
     command = 'cd %s && g4 changes ...' % self.checkout_dir
     _, out, _ = self.ce.RunCommandWOutput(command)
     self.changes = re.findall(r'Change (\d+)', out)
-    change_infos = re.findall(r'Change (\d+) on ([\d/]+) by '
-                              r"([^\s]+) ('[^']*')", out)
+    change_infos = re.findall(
+        r'Change (\d+) on ([\d/]+) by '
+        r"([^\s]+) ('[^']*')", out)
     for change_info in change_infos:
       ri = RevisionInfo(change_info[1], change_info[2], change_info[3])
       self.rim[change_info[0]] = ri
@@ -330,9 +394,8 @@
       else:
         to_return += ('%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n' %
                       (change, ri.status, ri.date, ri.client, ri.description,
-                       self.job_log_root + change + '.cmd',
-                       self.job_log_root + change + '.out',
-                       self.job_log_root + change + '.err'))
+                       self.job_log_root + change + '.cmd', self.job_log_root +
+                       change + '.out', self.job_log_root + change + '.err'))
     return to_return
 
 
@@ -367,9 +430,9 @@
 
     self.CleanupCLs()
     # Change the revision of only the gcc part of the toolchain.
-    command = ('cd %s/gcctools/google_vendor_src_branch/gcc '
-               '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir,
-                                                  current_revision))
+    command = (
+        'cd %s/gcctools/google_vendor_src_branch/gcc '
+        '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir, current_revision))
     self.current_ce.RunCommand(command)
 
     self.HandleBrokenCLs(current_revision)
@@ -427,8 +490,8 @@
       ce = command_executer.GetCommandExecuter()
       command = '%s %s' % (script, p4gccbs.checkout_dir)
       status = ce.RunCommand(command)
-      message = ('Revision: %s produced: %d status\n' % (current_revision,
-                                                         status))
+      message = (
+          'Revision: %s produced: %d status\n' % (current_revision, status))
       logger.GetLogger().LogOutput(message, print_to_console=verbose)
       terminated = p4gccbs.SetStatus(status)
       num_tries -= 1
diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py
index 1906525..0d5810c 100755
--- a/binary_search_tool/binary_search_state.py
+++ b/binary_search_tool/binary_search_state.py
@@ -1,4 +1,8 @@
 #!/usr/bin/env python2
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 """The binary search wrapper."""
 
 from __future__ import print_function
@@ -9,6 +13,7 @@
 import math
 import os
 import pickle
+import re
 import sys
 import tempfile
 import time
@@ -21,6 +26,7 @@
 from cros_utils import logger
 
 import binary_search_perforce
+import pass_mapping
 
 GOOD_SET_VAR = 'BISECT_GOOD_SET'
 BAD_SET_VAR = 'BISECT_BAD_SET'
@@ -63,8 +69,8 @@
   """The binary search state class."""
 
   def __init__(self, get_initial_items, switch_to_good, switch_to_bad,
-               test_setup_script, test_script, incremental, prune, iterations,
-               prune_iterations, verify, file_args, verbose):
+               test_setup_script, test_script, incremental, prune, pass_bisect,
+               iterations, prune_iterations, verify, file_args, verbose):
     """BinarySearchState constructor, see Run for full args documentation."""
     self.get_initial_items = get_initial_items
     self.switch_to_good = switch_to_good
@@ -73,6 +79,7 @@
     self.test_script = test_script
     self.incremental = incremental
     self.prune = prune
+    self.pass_bisect = pass_bisect
     self.iterations = iterations
     self.prune_iterations = prune_iterations
     self.verify = verify
@@ -87,6 +94,8 @@
     self.search_cycles = 0
     self.binary_search = None
     self.all_items = None
+    self.cmd_script = None
+    self.mode = None
     self.PopulateItemsUsingCommand(self.get_initial_items)
     self.currently_good_items = set([])
     self.currently_bad_items = set([])
@@ -182,6 +191,17 @@
     ret, _, _ = self.ce.RunCommandWExceptionCleanup(command)
     return ret
 
+  def GenerateBadCommandScript(self, bad_items):
+    """Generate command line script for building bad item."""
+    assert not self.prune, 'Prune must be false if pass_bisect is set.'
+    assert len(bad_items) == 1, 'Pruning is off, but number of bad ' \
+                                       'items found was not 1.'
+    item = list(bad_items)[0]
+    command = '%s %s' % (self.pass_bisect, item)
+    ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+        command, print_to_console=self.verbose)
+    return ret
+
   def DoVerify(self):
     """Verify correctness of test environment.
 
@@ -216,14 +236,14 @@
         status = self.TestScript()
       assert status == 1, 'When reset_to_bad, status should be 1.'
 
-  def DoSearch(self):
+  def DoSearchBadItems(self):
     """Perform full search for bad items.
 
     Perform full search until prune_iterations number of bad items are found.
     """
     while (True and len(self.all_items) > 1 and
            self.prune_cycles < self.prune_iterations):
-      terminated = self.DoBinarySearch()
+      terminated = self.DoBinarySearchBadItems()
       self.prune_cycles += 1
       if not terminated:
         break
@@ -234,6 +254,7 @@
       if prune_index == len(self.all_items) - 1:
         self.l.LogOutput('First bad item is the last item. Breaking.')
         self.l.LogOutput('Bad items are: %s' % self.all_items[-1])
+        self.found_items.add(self.all_items[-1])
         break
 
       # If already seen item we have no new bad items to find, finish up
@@ -270,7 +291,18 @@
       # FIXME: Do we need to Convert the currently good items to bad
       self.PopulateItemsUsingList(new_all_items)
 
-  def DoBinarySearch(self):
+    # If pass level bisecting is set, generate a script which contains command
+    # line options to rebuild bad item.
+    if self.pass_bisect:
+      status = self.GenerateBadCommandScript(self.found_items)
+      if status == 0:
+        self.cmd_script = os.path.join(
+            os.path.dirname(self.pass_bisect), 'cmd_script.sh')
+        self.l.LogOutput('Command script generated at %s.' % self.cmd_script)
+      else:
+        raise RuntimeError('Error while generating command script.')
+
+  def DoBinarySearchBadItems(self):
     """Perform single iteration of binary search."""
     # If in resume mode don't reset search_cycles
     if not self.resumed:
@@ -281,7 +313,7 @@
     terminated = False
     while self.search_cycles < self.iterations and not terminated:
       self.SaveState()
-      self.OutputIterationProgress()
+      self.OutputIterationProgressBadItem()
 
       self.search_cycles += 1
       [bad_items, good_items] = self.GetNextItems()
@@ -302,6 +334,199 @@
     self.l.LogOutput(str(self), print_to_console=self.verbose)
     return terminated
 
+  def CollectPassName(self, pass_info):
+    """Mapping opt-bisect output of pass info to debugcounter name."""
+    self.l.LogOutput('Pass info: %s' % pass_info, print_to_console=self.verbose)
+
+    for desc in pass_mapping.pass_name:
+      if desc in pass_info:
+        return pass_mapping.pass_name[desc]
+
+    # If pass not found, return None
+    return None
+
+  def BuildWithPassLimit(self, limit):
+    """ Rebuild bad item with pass level bisect limit
+
+    Run command line script generated by GenerateBadCommandScript(), with
+    pass level limit flags.
+
+    Return:
+      pass_num: current number of the pass, or total number of passes if
+                limit set to -1.
+      pass_name: The debugcounter name of current limit pass.
+    """
+    os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + str(limit)
+    self.l.LogOutput(
+        'Limit flags: %s' % os.environ['LIMIT_FLAGS'],
+        print_to_console=self.verbose)
+    command = self.cmd_script
+    _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+
+    # Massages we get will be like this:
+    #   BISECT: running pass (9) <Pass Description> on <function> (<file>)
+    #   BISECT: running pass (10) <Pass Description> on <module> (<file>)
+    #   BISECT: NOT running pass (11) <Pass Description> on <SCG> (<file>)
+    #   BISECT: NOT running pass (12) <Pass Description> on <SCG> (<file>)
+    # We want to get the pass description of last running pass, to have
+    # transformation level bisect on it.
+    if 'BISECT: ' not in msg:
+      raise RuntimeError('No bisect info printed, OptBisect may not be '
+                         'supported by the compiler.')
+
+    lines = msg.split('\n')
+    pass_num = 0
+    last_pass = ''
+    for l in lines:
+      if 'running pass' in l:
+        # For situation of limit==-1, we want the total number of passes
+        if limit != -1 and 'BISECT: NOT ' in l:
+          break
+        pass_num += 1
+        last_pass = l
+    if limit != -1 and pass_num != limit:
+      raise ValueError('[Error] While building, limit number does not match.')
+    return pass_num, self.CollectPassName(last_pass)
+
+  def BuildWithTransformLimit(self, limit, pass_name=None, pass_limit=-1):
+    """ Rebuild bad item with transformation level bisect limit
+
+    Run command line script generated by GenerateBadCommandScript(), with
+    pass level limit flags and transformation level limit flags.
+
+    Args:
+      limit: transformation level limit for bad item
+      pass_name: name of bad pass debugcounter from pass level bisect result
+      pass_limit: pass level limit from pass level bisect result
+    Return: Total number of transformations if limit set to -1, else return 0.
+    """
+    counter_name = pass_name
+
+    os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + \
+                                str(pass_limit) + \
+                                ' -mllvm -debug-counter=' + counter_name + \
+                                '-count=' + str(limit) + \
+                                ' -mllvm -print-debug-counter'
+    self.l.LogOutput(
+        'Limit flags: %s' % os.environ['LIMIT_FLAGS'],
+        print_to_console=self.verbose)
+    command = self.cmd_script
+    _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+
+    if 'Counters and values:' not in msg:
+      raise RuntimeError('No bisect info printed, DebugCounter may not be '
+                         'supported by the compiler.')
+
+    # With debugcounter enabled, there will be DebugCounter counting info in
+    # the output.
+    lines = msg.split('\n')
+    for l in lines:
+      if pass_name in l:
+        # Output of debugcounter will be like:
+        #   instcombine-visit: {10, 0, 20}
+        #   dce-transform: {1, 0, -1}
+        # which indicates {Count, Skip, StopAfter}.
+        # The last number should be the limit we set.
+        # We want the first number as the total transformation count.
+        # Split each line by ,|{|} and we can get l_list as:
+        #   ['instcombine: ', '10', '0', '20', '']
+        # and we will need the second item in it.
+        l_list = re.split(',|{|}', l)
+        count = int(l_list[1])
+        if limit == -1:
+          return count
+    # The returned value is only useful when limit == -1, which shows total
+    # transformation count.
+    return 0
+
+  def DoSearchBadPass(self):
+    """Perform full search for bad pass of bad item."""
+    logger.GetLogger().LogOutput('Starting to bisect bad pass for bad item.')
+
+    # Pass level bisection
+    self.mode = 'pass'
+    self.binary_search = binary_search_perforce.BinarySearcherForPass(
+        logger_to_set=self.l)
+    self.binary_search.total, _ = self.BuildWithPassLimit(-1)
+    logger.GetLogger().LogOutput(
+        'Total %s number: %d' % (self.mode, self.binary_search.total))
+
+    pass_index, pass_name = self.DoBinarySearchBadPass()
+
+    if (not pass_name and pass_index == 0):
+      raise ValueError('Bisecting passes cannot reproduce good result.')
+    logger.GetLogger().LogOutput('Bad pass found: %s.' % pass_name)
+
+    # Transformation level bisection.
+    logger.GetLogger().LogOutput('Starting to bisect at transformation level.')
+
+    self.mode = 'transform'
+    self.binary_search = binary_search_perforce.BinarySearcherForPass(
+        logger_to_set=self.l)
+    self.binary_search.total = self.BuildWithTransformLimit(
+        -1, pass_name, pass_index)
+    logger.GetLogger().LogOutput(
+        'Total %s number: %d' % (self.mode, self.binary_search.total))
+
+    trans_index, _ = self.DoBinarySearchBadPass(pass_index, pass_name)
+    if (trans_index == 0):
+      raise ValueError('Bisecting %s cannot reproduce good result.' % pass_name)
+    logger.GetLogger().LogOutput(
+        'Bisection result for bad item %s:\n'
+        'Bad pass: %s at number %d\n'
+        'Bad transformation number: %d' % (self.found_items, pass_name,
+                                           pass_index, trans_index))
+
+  def DoBinarySearchBadPass(self, pass_index=-1, pass_name=None):
+    """Perform single iteration of binary search at pass level
+
+    Args:
+      pass_index: Works for transformation level bisection, indicates the limit
+        number of pass from pass level bisecting result.
+      pass_name: Works for transformation level bisection, indicates
+        DebugCounter name of the bad pass from pass level bisecting result.
+    Return:
+      index: Index of problematic pass/transformation.
+      pass_name: Works for pass level bisection, returns DebugCounter name for
+        bad pass.
+    """
+    # If in resume mode don't reset search_cycles
+    if not self.resumed:
+      self.search_cycles = 0
+    else:
+      self.resumed = False
+
+    terminated = False
+    index = 0
+    while self.search_cycles < self.iterations and not terminated:
+      self.SaveState()
+      self.OutputIterationProgressBadPass()
+
+      self.search_cycles += 1
+      current = self.binary_search.GetNext()
+
+      if self.mode == 'pass':
+        index, pass_name = self.BuildWithPassLimit(current)
+      else:
+        self.BuildWithTransformLimit(current, pass_name, pass_index)
+        index = current
+
+      # TODO: Newly generated object should not directly replace original
+      # one, need to put it somewhere and symbol link original one to it.
+      # Will update cmd_script to do it.
+
+      status = self.TestSetupScript()
+      assert status == 0, 'Test setup should succeed.'
+      status = self.TestScript()
+      terminated = self.binary_search.SetStatus(status)
+
+      if terminated:
+        self.l.LogOutput('Terminated!', print_to_console=self.verbose)
+    if not terminated:
+      self.l.LogOutput('Ran out of iterations searching...')
+    self.l.LogOutput(str(self), print_to_console=self.verbose)
+    return index, pass_name
+
   def PopulateItemsUsingCommand(self, command):
     """Update all_items and binary search logic from executable.
 
@@ -434,15 +659,21 @@
     progress = progress % (self.ElapsedTimeString(), progress_text)
     self.l.LogOutput(progress)
 
-  def OutputIterationProgress(self):
+  def OutputIterationProgressBadItem(self):
     out = ('Search %d of estimated %d.\n'
            'Prune %d of max %d.\n'
            'Current bad items found:\n'
            '%s\n')
     out = out % (self.search_cycles + 1,
-                 math.ceil(math.log(len(self.all_items), 2)),
-                 self.prune_cycles + 1, self.prune_iterations,
-                 ', '.join(self.found_items))
+                 math.ceil(math.log(len(self.all_items), 2)), self.prune_cycles
+                 + 1, self.prune_iterations, ', '.join(self.found_items))
+    self._OutputProgress(out)
+
+  def OutputIterationProgressBadPass(self):
+    out = ('Search %d of estimated %d.\n' 'Current limit: %s\n')
+    out = out % (self.search_cycles + 1,
+                 math.ceil(math.log(self.binary_search.total, 2)),
+                 self.binary_search.current)
     self._OutputProgress(out)
 
   def __str__(self):
@@ -467,6 +698,7 @@
         'test_script': None,
         'incremental': True,
         'prune': False,
+        'pass_bisect': None,
         'iterations': 50,
         'prune_iterations': 100,
         'verify': True,
@@ -498,33 +730,41 @@
         test_setup_script=None,
         iterations=50,
         prune=False,
+        pass_bisect=None,
         noincremental=False,
         file_args=False,
         verify=True,
         prune_iterations=100,
         verbose=False,
         resume=False):
-  """Run binary search tool. Equivalent to running through terminal.
+  """Run binary search tool.
+
+  Equivalent to running through terminal.
 
   Args:
     get_initial_items: Script to enumerate all items being binary searched
     switch_to_good: Script that will take items as input and switch them to good
-                    set
+      set
     switch_to_bad: Script that will take items as input and switch them to bad
-                   set
+      set
     test_script: Script that will determine if the current combination of good
-                 and bad items make a "good" or "bad" result.
+      and bad items make a "good" or "bad" result.
     test_setup_script: Script to do necessary setup (building, compilation,
-                       etc.) for test_script.
+      etc.) for test_script.
     iterations: How many binary search iterations to run before exiting.
     prune: If False the binary search tool will stop when the first bad item is
-           found. Otherwise then binary search tool will continue searching
-           until all bad items are found (or prune_iterations is reached).
+      found. Otherwise then binary search tool will continue searching until all
+      bad items are found (or prune_iterations is reached).
+    pass_bisect: Script that takes single bad item from POPULATE_BAD and returns
+      the compiler command used to generate the bad item. This will turn on
+      pass/ transformation level bisection for the bad item. Requires that
+      'prune' be set to False, and needs support of `-opt-bisect-limit`(pass)
+      and `-print-debug-counter`(transformation) from LLVM.
     noincremental: Whether to send "diffs" of good/bad items to switch scripts.
     file_args: If True then arguments to switch scripts will be a file name
-               containing a newline separated list of the items to switch.
-    verify: If True, run tests to ensure initial good/bad sets actually
-            produce a good/bad result.
+      containing a newline separated list of the items to switch.
+    verify: If True, run tests to ensure initial good/bad sets actually produce
+      a good/bad result.
     prune_iterations: Max number of bad items to search for.
     verbose: If True will print extra debug information to user.
     resume: If True will resume using STATE_FILE.
@@ -532,18 +772,36 @@
   Returns:
     0 for success, error otherwise
   """
+  # Notice that all the argument checks are in the Run() function rather than
+  # in the Main() function. It is not common to do so but some wrappers are
+  # going to call Run() directly and bypass checks in Main() function.
   if resume:
+    logger.GetLogger().LogOutput('Resuming from %s' % STATE_FILE)
     bss = BinarySearchState.LoadState()
     if not bss:
       logger.GetLogger().LogOutput(
           '%s is not a valid binary_search_tool state file, cannot resume!' %
           STATE_FILE)
       return 1
+    logger.GetLogger().LogOutput('Note: resuming from previous state, '
+                                 'ignoring given options and loading saved '
+                                 'options instead.')
   else:
+    if not (get_initial_items and switch_to_good and switch_to_bad and
+            test_script):
+      logger.GetLogger().LogOutput('The following options are required: '
+                                   '[-i, -g, -b, -t] | [-r]')
+      return 1
+    if pass_bisect and prune:
+      logger.GetLogger().LogOutput('"--pass_bisect" only works when '
+                                   '"--prune" is set to be False.')
+      return 1
     switch_to_good = _CanonicalizeScript(switch_to_good)
     switch_to_bad = _CanonicalizeScript(switch_to_bad)
     if test_setup_script:
       test_setup_script = _CanonicalizeScript(test_setup_script)
+    if pass_bisect:
+      pass_bisect = _CanonicalizeScript(pass_bisect)
     test_script = _CanonicalizeScript(test_script)
     get_initial_items = _CanonicalizeScript(get_initial_items)
     incremental = not noincremental
@@ -552,12 +810,14 @@
 
     bss = BinarySearchState(get_initial_items, switch_to_good, switch_to_bad,
                             test_setup_script, test_script, incremental, prune,
-                            iterations, prune_iterations, verify, file_args,
-                            verbose)
+                            pass_bisect, iterations, prune_iterations, verify,
+                            file_args, verbose)
     bss.DoVerify()
 
   try:
-    bss.DoSearch()
+    bss.DoSearchBadItems()
+    if pass_bisect:
+      bss.DoSearchBadPass()
     bss.RemoveState()
     logger.GetLogger().LogOutput(
         'Total execution time: %s' % bss.ElapsedTimeString())
@@ -577,18 +837,6 @@
   logger.GetLogger().LogOutput(' '.join(argv))
   options = parser.parse_args(argv)
 
-  if not (options.get_initial_items and options.switch_to_good and
-          options.switch_to_bad and options.test_script) and not options.resume:
-    parser.print_help()
-    return 1
-
-  if options.resume:
-    logger.GetLogger().LogOutput('Resuming from %s' % STATE_FILE)
-    if len(argv) > 1:
-      logger.GetLogger().LogOutput(('Note: resuming from previous state, '
-                                    'ignoring given options and loading saved '
-                                    'options instead.'))
-
   # Get dictionary of all options
   args = vars(options)
   return Run(**args)
diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py
index 0b3fb1d..21dd11f 100644
--- a/binary_search_tool/bisect_driver.py
+++ b/binary_search_tool/bisect_driver.py
@@ -1,8 +1,8 @@
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Googie Inc.  All rights Reserved.
 #
-# This script is used to help the compiler wrapper in the Android build system
-# bisect for bad object files.
-"""Utilities for bisection of Android object files.
+# This script is used to help the compiler wrapper in the ChromeOS and
+# Android build systems bisect for bad object files.
+"""Utilities for bisection of ChromeOS and Android object files.
 
 This module contains a set of utilities to allow bisection between
 two sets (good and bad) of object files. Mostly used to find compiler
@@ -24,7 +24,7 @@
 import subprocess
 import sys
 
-VALID_MODES = ['POPULATE_GOOD', 'POPULATE_BAD', 'TRIAGE']
+VALID_MODES = ('POPULATE_GOOD', 'POPULATE_BAD', 'TRIAGE')
 GOOD_CACHE = 'good'
 BAD_CACHE = 'bad'
 LIST_FILE = os.path.join(GOOD_CACHE, '_LIST')
@@ -35,7 +35,6 @@
 
 class Error(Exception):
   """The general compiler wrapper error class."""
-  pass
 
 
 @contextlib.contextmanager
@@ -58,7 +57,13 @@
     mode: mode to open file with ('w', 'r', etc.)
   """
   with open(path, mode) as f:
-    # Share the lock if just reading, make lock exclusive if writing
+    # Apply FD_CLOEXEC argument to fd. This ensures that the file descriptor
+    # won't be leaked to any child processes.
+    current_args = fcntl.fcntl(f.fileno(), fcntl.F_GETFD)
+    fcntl.fcntl(f.fileno(), fcntl.F_SETFD, current_args | fcntl.FD_CLOEXEC)
+
+    # Reads can share the lock as no race conditions exist. If write is needed,
+    # give writing process exclusive access to the file.
     if f.mode == 'r' or f.mode == 'rb':
       lock_type = fcntl.LOCK_SH
     else:
@@ -68,8 +73,6 @@
       fcntl.lockf(f, lock_type)
       yield f
       f.flush()
-    except:
-      raise
     finally:
       fcntl.lockf(f, fcntl.LOCK_UN)
 
@@ -103,8 +106,7 @@
   determine where an object file should be linked from (good or bad).
   """
   bad_set_file = os.environ.get('BISECT_BAD_SET')
-  ret = subprocess.call(['grep', '-x', '-q', obj_file, bad_set_file])
-  if ret == 0:
+  if in_object_list(obj_file, bad_set_file):
     return BAD_CACHE
   else:
     return GOOD_CACHE
@@ -124,18 +126,29 @@
 
   Returns:
     Absolute object path from execution args (-o argument). If no object being
-    outputted or output doesn't end in ".o" then return empty string.
+    outputted, then return empty string. -o argument is checked only if -c is
+    also present.
   """
   try:
     i = execargs.index('-o')
+    _ = execargs.index('-c')
   except ValueError:
     return ''
 
   obj_path = execargs[i + 1]
-  if not obj_path.endswith(('.o',)):
-    # TODO: what suffixes do we need to contemplate
-    # TODO: add this as a warning
-    # TODO: need to handle -r compilations
+  # Ignore args that do not create a file.
+  if obj_path in (
+      '-',
+      '/dev/null',):
+    return ''
+  # Ignore files ending in .tmp.
+  if obj_path.endswith(('.tmp',)):
+    return ''
+  # Ignore configuration files generated by Automake/Autoconf/CMake etc.
+  if (obj_path.endswith('conftest.o') or
+      obj_path.endswith('CMakeFiles/test.o') or
+      obj_path.find('CMakeTmp') != -1 or
+      os.path.abspath(obj_path).find('CMakeTmp') != -1):
     return ''
 
   return os.path.abspath(obj_path)
@@ -151,7 +164,7 @@
   if '-MD' not in execargs and '-MMD' not in execargs:
     return ''
 
-  # If -MF given this is the path of the dependency file. Otherwise the
+  # If -MF is given this is the path of the dependency file. Otherwise the
   # dependency file is the value of -o but with a .d extension
   if '-MF' in execargs:
     i = execargs.index('-MF')
@@ -217,7 +230,16 @@
 
 
 def cache_file(execargs, bisect_dir, cache, abs_file_path):
-  """Cache compiler output file (.o/.d/.dwo)."""
+  """Cache compiler output file (.o/.d/.dwo).
+
+  Args:
+    execargs: compiler execution arguments.
+    bisect_dir: The directory where bisection caches live.
+    cache: Which cache the file will be cached to (GOOD/BAD).
+    abs_file_path: Absolute path to file being cached.
+  Returns:
+    True if caching was successful, False otherwise.
+  """
   # os.path.join fails with absolute paths, use + instead
   bisect_path = os.path.join(bisect_dir, cache) + abs_file_path
   bisect_path_dir = os.path.dirname(bisect_path)
@@ -227,14 +249,36 @@
 
   try:
     if os.path.exists(abs_file_path):
+      if os.path.exists(bisect_path):
+        # File exists
+        population_dir = os.path.join(bisect_dir, cache)
+        with lock_file(os.path.join(population_dir, '_DUPS'),
+                       'a') as dup_object_list:
+          dup_object_list.write('%s\n' % abs_file_path)
+        raise Exception(
+            'Trying to cache file %s multiple times.' % abs_file_path)
+
       shutil.copy2(abs_file_path, bisect_path)
+      # Set cache object to be read-only so later compilations can't
+      # accidentally overwrite it.
+      os.chmod(bisect_path, 0o444)
+      return True
+    else:
+      # File not found (happens when compilation fails but error code is still 0)
+      return False
   except Exception:
     print('Could not cache file %s' % abs_file_path, file=sys.stderr)
     raise
 
 
 def restore_file(bisect_dir, cache, abs_file_path):
-  """Restore file from cache (.o/.d/.dwo)."""
+  """Restore file from cache (.o/.d/.dwo).
+
+  Args:
+    bisect_dir: The directory where bisection caches live.
+    cache: Which cache the file will be restored from (GOOD/BAD).
+    abs_file_path: Absolute path to file being restored.
+  """
   # os.path.join fails with absolute paths, use + instead
   cached_path = os.path.join(bisect_dir, cache) + abs_file_path
   if os.path.exists(cached_path):
@@ -264,21 +308,41 @@
     return retval
 
   full_obj_path = get_obj_path(execargs)
-  # If not a normal compiler call then just exit
+  # This is not a normal compiler call because it doesn't have a -o argument,
+  # or the -o argument has an unusable output file.
+  # It's likely that this compiler call was actually made to invoke the linker,
+  # or as part of a configuratoin test. In this case we want to simply call the
+  # compiler and return.
   if not full_obj_path:
-    return
+    return retval
 
-  cache_file(execargs, bisect_dir, population_name, full_obj_path)
+  # Return if not able to cache the object file
+  if not cache_file(execargs, bisect_dir, population_name, full_obj_path):
+    return retval
 
   population_dir = os.path.join(bisect_dir, population_name)
   with lock_file(os.path.join(population_dir, '_LIST'), 'a') as object_list:
     object_list.write('%s\n' % full_obj_path)
 
   for side_effect in get_side_effects(execargs):
-    cache_file(execargs, bisect_dir, population_name, side_effect)
+    _ = cache_file(execargs, bisect_dir, population_name, side_effect)
+
+  return retval
 
 
 def bisect_triage(execargs, bisect_dir):
+  """Use object object file from appropriate cache (good/bad).
+
+  Given a populated bisection directory, use the object file saved
+  into one of the caches (good/bad) according to what is specified
+  in the good/bad sets. The good/bad sets are generated by the
+  high level binary search tool. Additionally restore any possible
+  side effects of compiler.
+
+  Args:
+    execargs: compiler execution arguments.
+    bisect_dir: populated bisection directory.
+  """
   full_obj_path = get_obj_path(execargs)
   obj_list = os.path.join(bisect_dir, LIST_FILE)
 
@@ -309,7 +373,7 @@
       return retval
     os.remove(full_obj_path)
     restore_file(bisect_dir, cache, full_obj_path)
-    return
+    return retval
 
   # Generate compiler side effects. Trick Make into thinking compiler was
   # actually executed.
@@ -321,14 +385,16 @@
   if not os.path.exists(full_obj_path):
     restore_file(bisect_dir, cache, full_obj_path)
 
+  return 0
+
 
 def bisect_driver(bisect_stage, bisect_dir, execargs):
   """Call appropriate bisection stage according to value in bisect_stage."""
   if bisect_stage == 'POPULATE_GOOD':
-    bisect_populate(execargs, bisect_dir, GOOD_CACHE)
+    return bisect_populate(execargs, bisect_dir, GOOD_CACHE)
   elif bisect_stage == 'POPULATE_BAD':
-    bisect_populate(execargs, bisect_dir, BAD_CACHE)
+    return bisect_populate(execargs, bisect_dir, BAD_CACHE)
   elif bisect_stage == 'TRIAGE':
-    bisect_triage(execargs, bisect_dir)
+    return bisect_triage(execargs, bisect_dir)
   else:
     raise ValueError('wrong value for BISECT_STAGE: %s' % bisect_stage)
diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py
index 945270b..2850801 100644
--- a/binary_search_tool/common.py
+++ b/binary_search_tool/common.py
@@ -1,3 +1,7 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
 """Common config and logic for binary search tool
 
 This module serves two main purposes:
@@ -142,35 +146,35 @@
       '-i',
       '--get_initial_items',
       dest='get_initial_items',
-      help=('Script to run to get the initial objects. '
-            'If your script requires user input '
-            'the --verbose option must be used'))
+      help='Script to run to get the initial objects. '
+           'If your script requires user input '
+           'the --verbose option must be used')
   args.AddArgument(
       '-g',
       '--switch_to_good',
       dest='switch_to_good',
-      help=('Script to run to switch to good. '
-            'If your switch script requires user input '
-            'the --verbose option must be used'))
+      help='Script to run to switch to good. '
+           'If your switch script requires user input '
+           'the --verbose option must be used')
   args.AddArgument(
       '-b',
       '--switch_to_bad',
       dest='switch_to_bad',
-      help=('Script to run to switch to bad. '
-            'If your switch script requires user input '
-            'the --verbose option must be used'))
+      help='Script to run to switch to bad. '
+           'If your switch script requires user input '
+           'the --verbose option must be used')
   args.AddArgument(
       '-I',
       '--test_setup_script',
       dest='test_setup_script',
-      help=('Optional script to perform building, flashing, '
-            'and other setup before the test script runs.'))
+      help='Optional script to perform building, flashing, '
+           'and other setup before the test script runs.')
   args.AddArgument(
       '-t',
       '--test_script',
       dest='test_script',
-      help=('Script to run to test the '
-            'output after packages are built.'))
+      help='Script to run to test the '
+           'output after packages are built.')
   # No input (evals to False),
   # --prune (evals to True),
   # --prune=False,
@@ -184,8 +188,20 @@
       default=False,
       type=StrToBool,
       metavar='bool',
-      help=('If True, continue until all bad items are found. '
-            'Defaults to False.'))
+      help='If True, continue until all bad items are found. '
+            'Defaults to False.')
+  args.AddArgument(
+      '-P',
+      '--pass_bisect',
+      dest='pass_bisect',
+      default=None,
+      help='Script to generate another script for pass level bisect, '
+           'which contains command line options to build bad item. '
+           'This will also turn on pass/transformation level bisection. '
+           'Needs support of `-opt-bisect-limit`(pass) and '
+           '`-print-debug-counter`(transformation) from LLVM. '
+           'For now it only supports one single bad item, so to use it, '
+           'prune must be set to False.')
   # No input (evals to False),
   # --noincremental (evals to True),
   # --noincremental=False,
@@ -199,8 +215,8 @@
       default=False,
       type=StrToBool,
       metavar='bool',
-      help=('If True, don\'t propagate good/bad changes '
-            'incrementally. Defaults to False.'))
+      help='If True, don\'t propagate good/bad changes '
+           'incrementally. Defaults to False.')
   # No input (evals to False),
   # --file_args (evals to True),
   # --file_args=False,
@@ -214,8 +230,8 @@
       default=False,
       type=StrToBool,
       metavar='bool',
-      help=('Whether to use a file to pass arguments to scripts. '
-            'Defaults to False.'))
+      help='Whether to use a file to pass arguments to scripts. '
+           'Defaults to False.')
   # No input (evals to True),
   # --verify (evals to True),
   # --verify=False,
@@ -228,8 +244,8 @@
       default=True,
       type=StrToBool,
       metavar='bool',
-      help=('Whether to run verify iterations before searching. '
-            'Defaults to True.'))
+      help='Whether to run verify iterations before searching. '
+           'Defaults to True.')
   args.AddArgument(
       '-N',
       '--prune_iterations',
@@ -256,6 +272,6 @@
       '--resume',
       dest='resume',
       action='store_true',
-      help=('Resume bisection tool execution from state file.'
-            'Useful if the last bisection was terminated '
-            'before it could properly finish.'))
+      help='Resume bisection tool execution from state file.'
+           'Useful if the last bisection was terminated '
+           'before it could properly finish.')
diff --git a/binary_search_tool/pass_mapping.py b/binary_search_tool/pass_mapping.py
new file mode 100644
index 0000000..cb80910
--- /dev/null
+++ b/binary_search_tool/pass_mapping.py
@@ -0,0 +1,32 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Config file for pass level bisection
+
+Provides a mapping from pass info from -opt-bisect result to DebugCounter name.
+"""
+pass_name = {
+    # The list now contains all the passes in LLVM that support DebugCounter at
+    # transformation level.
+    # We will need to keep updating this map after more DebugCounter added to
+    # each pass in LLVM.
+    # For users who make local changes to passes, please add a map from pass
+    # description to newly introduced DebugCounter name for transformation
+    # level bisection purpose.
+    'Hoist/decompose integer division and remainder':
+        'div-rem-pairs-transform',
+    'Early CSE':
+        'early-cse',
+    'Falkor HW Prefetch Fix Late Phase':
+        'falkor-hwpf',
+    'Combine redundant instructions':
+        'instcombine-visit',
+    'Machine Copy Propagation Pass':
+        'machine-cp-fwd',
+    'Global Value Numbering':
+        'newgvn-phi',
+    'PredicateInfo Printer':
+        'predicateinfo-rename',
+    'SI Insert Waitcnts':
+        'si-insert-waitcnts-forceexp',
+}
diff --git a/binary_search_tool/sysroot_wrapper/interactive_test_host.sh b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh
new file mode 100755
index 0000000..58adffc
--- /dev/null
+++ b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh
@@ -0,0 +1,25 @@
+#!/bin/bash -u
+#
+# Copyright 2017 Google Inc. All Rights Reserved.
+#
+# This script is intended to be used by binary_search_state.py, as
+# part of the binary search triage on ChromeOS package and object files for a
+# host package. It waits for the test setup script to build the image, then asks
+# the user if the image is good or not. (Since this is a host package, there is
+# no 'install' phase needed.)  This script should return '0' if the test succeeds
+# (the image is 'good'); '1' if the test fails (the image is 'bad'); and '125'
+# if it could not determine (does not apply in this case).
+#
+
+source common/common.sh
+
+while true; do
+    read -p "Is this a good ChromeOS image?" yn
+    case $yn in
+        [Yy]* ) exit 0;;
+        [Nn]* ) exit 1;;
+        * ) echo "Please answer yes or no.";;
+    esac
+done
+
+exit 125
diff --git a/binary_search_tool/sysroot_wrapper/test_setup_host.sh b/binary_search_tool/sysroot_wrapper/test_setup_host.sh
new file mode 100755
index 0000000..b5169ee
--- /dev/null
+++ b/binary_search_tool/sysroot_wrapper/test_setup_host.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+#
+# Copyright 2017 Google Inc. All Rights Reserved.
+#
+# This is a generic ChromeOS package/image test setup script. It is meant to
+# be used for either the object file or package bisection tools. This script
+# is intended to be used with host object bisection, to bisect the object
+# files in a host package.  Since it deals with a host package, there is no
+# building an image or flashing a device -- just building the host package
+# itself.
+#
+# This script is intended to be used by binary_search_state.py, as
+# part of the binary search triage on ChromeOS objects and packages. It should
+# return '0' if the setup succeeds; and '1' if the setup fails (the image
+# could not build or be flashed).
+#
+
+export PYTHONUNBUFFERED=1
+
+source common/common.sh
+
+
+if [[ "${BISECT_MODE}" == "OBJECT_MODE" ]]; then
+  echo "EMERGING ${BISECT_PACKAGE}"
+  sudo -E emerge ${BISECT_PACKAGE}
+  emerge_status=$?
+
+  if [[ ${emerge_status} -ne 0 ]] ; then
+    echo "emerging ${BISECT_PACKAGE} returned a non-zero status: $emerge_status"
+    exit 1
+  fi
+
+  exit 0
+fi
+
+
+exit 0
diff --git a/binary_search_tool/test/binary_search_tool_tester.py b/binary_search_tool/test/binary_search_tool_tester.py
index e733d9c..923ea11 100755
--- a/binary_search_tool/test/binary_search_tool_tester.py
+++ b/binary_search_tool/test/binary_search_tool_tester.py
@@ -1,6 +1,8 @@
 #!/usr/bin/env python2
 
-# Copyright 2012 Google Inc. All Rights Reserved.
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 """Tests for bisecting tool."""
 
 from __future__ import print_function
@@ -283,7 +285,7 @@
         prune=True,
         file_args=True,
         iterations=1)
-    bss.DoSearch()
+    bss.DoSearchBadItems()
     self.assertFalse(bss.found_items)
 
   def test_no_prune(self):
@@ -295,7 +297,25 @@
         test_setup_script='./test_setup.py',
         prune=False,
         file_args=True)
-    bss.DoSearch()
+    bss.DoSearchBadItems()
+    self.assertEquals(len(bss.found_items), 1)
+
+    bad_objs = common.ReadObjectsFile()
+    found_obj = int(bss.found_items.pop())
+    self.assertEquals(bad_objs[found_obj], 1)
+
+  def test_pass_bisect(self):
+    bss = binary_search_state.MockBinarySearchState(
+        get_initial_items='./gen_init_list.py',
+        switch_to_good='./switch_to_good.py',
+        switch_to_bad='./switch_to_bad.py',
+        pass_bisect='./generate_cmd.py',
+        test_script='./is_good.py',
+        test_setup_script='./test_setup.py',
+        prune=False,
+        file_args=True)
+    # TODO: Need to design unit tests for pass level bisection
+    bss.DoSearchBadItems()
     self.assertEquals(len(bss.found_items), 1)
 
     bad_objs = common.ReadObjectsFile()
diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py
index 5d68689..111068c 100755
--- a/buildbot_test_llvm.py
+++ b/buildbot_test_llvm.py
@@ -1,4 +1,8 @@
 #!/usr/bin/env python2
+#
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 """Script for running llvm validation tests on ChromeOS.
 
 This script launches a buildbot to build ChromeOS with the llvm on
@@ -29,23 +33,59 @@
 MAIL_PROGRAM = '~/var/bin/mail-sheriff'
 VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, 'validation_result')
 START_DATE = datetime.date(2016, 1, 1)
-TEST_PER_DAY = 3
+TEST_PER_DAY = 4
+DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/'
+
+# Information about Rotating Boards
+#  Board        Arch     Reference    Platform      Kernel
+#                        Board                      Version
+#  ------------ -------  ------------ ------------- -------
+#  cave         x86_64   glados       skylake-y     3.18
+#  daisy        armv7    daisy        exynos-5250   3.8.11
+#  elm          aarch64  oak          mediatek-8173 3.18
+#  fizz         x86_64   fizz         kabylake-u/r  4.4.*
+#  gale         armv7                               3.18
+#  grunt        x86_64   grunt        stoney ridge  4.14.*
+#  guado_moblab x86_64                              3.14
+#  kevin        aarch64  gru          rockchip-3399 4.4.*
+#  lakitu       x86_64                              4.4.*
+#  lars         x86_64   kunimitsu    skylake-u     3.18
+#  link         x86_64   ivybridge    ivybridge     3.8.11
+#  nautilus     x86_64   poppy        kabylake-y    4.4.*
+#  nyan_big     armv7    nyan         tegra         3.10.18
+#  peach_pit    armv7    peach        exynos-5420   3.8.11
+#  peppy        x86_64   slippy       haswell       3.8.11
+#  samus        x86_64   auron        broadwell     3.14
+#  snappy       x86_64   reef         apollo lake   4.4.*
+#  swanky       x86_64   rambi        baytrail      4.4.*
+#  terra        x86_64   strago       braswell      3.18
+#  veyron_jaq   armv7    veyron-pinky rockchip-3288 3.14
+#  whirlwind    armv7                               3.14
+#  zoombini     x86_64   zoombini     cannonlake-y  4.14.*
+
 TEST_BOARD = [
-    'squawks',  # x86_64, rambi  (baytrail)
-    'terra',  # x86_64, strago (braswell)
-    'lulu',  # x86_64, auron  (broadwell)
-    'peach_pit',  # arm,    peach  (exynos-5420)
-    'peppy',  # x86_64, slippy (haswell celeron)
-    'link',  # x86_64, ivybridge (ivybridge)
-    'nyan_big',  # arm,    nyan   (tegra)
-    'sentry',  # x86_64, kunimitsu (skylake-u)
-    'chell',  # x86_64, glados (skylake-y)
-    'daisy',  # arm,    daisy  (exynos)
-    'caroline',  # x86_64, glados (skylake-y)
-    'kevin',  # arm,    gru  (Rockchip)
-    'reef',  # x86_64, reef  (Apollo Lake)
+    'cave',
+    'daisy',
+    # 'elm', tested by arm64-llvm-next-toolchain builder.
+    'fizz',
+    'gale',
+    'grunt',
+    'guado_moblab',
+    'kevin',
     'lakitu',
+    'lars',
+    'link',
+    'nautilus',
+    'nyan_big',
+    'peach_pit',
+    'peppy',
+    # 'samus', tested by amd64-llvm-next-toolchain builder.
+    'snappy',
+    'swanky',
+    'terra',
+    # 'veyron_jaq', tested by arm-llvm-next-toolchain builder.
     'whirlwind',
+    'zoombini',
 ]
 
 
@@ -59,7 +99,7 @@
     self._ce = command_executer.GetCommandExecuter()
     self._l = logger.GetLogger()
     self._compiler = compiler
-    self._build = '%s-%s-toolchain' % (board, compiler)
+    self._build = '%s-%s-toolchain-tryjob' % (board, compiler)
     self._patches = patches.split(',') if patches else []
     self._patches_string = '_'.join(str(p) for p in self._patches)
 
@@ -69,35 +109,30 @@
       self._weekday = weekday
     self._reports = os.path.join(VALIDATION_RESULT_DIR, compiler, board)
 
-  def _FinishSetup(self):
-    """Make sure testing_rsa file is properly set up."""
-    # Fix protections on ssh key
-    command = ('chmod 600 /var/cache/chromeos-cache/distfiles/target'
-               '/chrome-src-internal/src/third_party/chromite/ssh_keys'
-               '/testing_rsa')
-    ret_val = self._ce.ChrootRunCommand(self._chromeos_root, command)
-    if ret_val != 0:
-      raise RuntimeError('chmod for testing_rsa failed')
-
   def DoAll(self):
     """Main function inside ToolchainComparator class.
 
     Launch trybot, get image names, create crosperf experiment file, run
     crosperf, and copy images into seven-day report directories.
     """
-    flags = ['--hwtest']
-    date_str = datetime.date.today()
-    description = 'master_%s_%s_%s' % (self._patches_string, self._build,
-                                       date_str)
-    _ = buildbot_utils.GetTrybotImage(
+    buildbucket_id, _ = buildbot_utils.GetTrybotImage(
         self._chromeos_root,
         self._build,
         self._patches,
-        description,
-        other_flags=flags,
+        tryjob_flags=['--hwtest'],
         async=True)
 
-    return 0
+    return buildbucket_id
+
+
+def WriteRotatingReportsData(results_dict, date):
+  """Write data for waterfall report."""
+  fname = '%d-%02d-%02d.builds' % (date.year, date.month, date.day)
+  filename = os.path.join(DATA_DIR, 'rotating-builders', fname)
+  with open(filename, 'w') as out_file:
+    for board in results_dict.keys():
+      buildbucket_id = results_dict[board]
+      out_file.write('%s,%s\n' % (buildbucket_id, board))
 
 
 def Main(argv):
@@ -148,16 +183,20 @@
   days = delta.days
 
   start_board = (days * TEST_PER_DAY) % len(TEST_BOARD)
+  results_dict = dict()
   for i in range(TEST_PER_DAY):
     try:
       board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)]
       fv = ToolchainVerifier(board, options.chromeos_root, options.weekday,
                              options.patches, options.compiler)
-      fv.DoAll()
+      buildbucket_id = fv.DoAll()
+      if buildbucket_id:
+        results_dict[board] = buildbucket_id
     except SystemExit:
       logfile = os.path.join(VALIDATION_RESULT_DIR, options.compiler, board)
       with open(logfile, 'w') as f:
         f.write('Verifier got an exception, please check the log.\n')
+  WriteRotatingReportsData(results_dict, today)
 
 
 if __name__ == '__main__':
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index 06707be..bf3a8de 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -41,7 +41,7 @@
 IMAGE_DIR = '{board}-{image_type}'
 IMAGE_VERSION_STR = r'{chrome_version}-{tip}\.{branch}\.{branch_branch}'
 IMAGE_FS = IMAGE_DIR + '/' + IMAGE_VERSION_STR
-TRYBOT_IMAGE_FS = 'trybot-' + IMAGE_FS + '-{build_id}'
+TRYBOT_IMAGE_FS = IMAGE_FS + '-{build_id}'
 PFQ_IMAGE_FS = IMAGE_FS + '-rc1'
 IMAGE_RE_GROUPS = {
     'board': r'(?P<board>\S+)',
@@ -71,7 +71,7 @@
     self._base_dir = os.getcwd()
     self._ce = command_executer.GetCommandExecuter()
     self._l = logger.GetLogger()
-    self._build = '%s-release' % board
+    self._build = '%s-release-tryjob' % board
     self._patches = patches.split(',') if patches else []
     self._patches_string = '_'.join(str(p) for p in self._patches)
     self._noschedv2 = noschedv2
@@ -84,19 +84,22 @@
                                            '%Y-%m-%d_%H:%M:%S')
     self._reports_dir = os.path.join(
         NIGHTLY_TESTS_DIR,
-        '%s.%s' % (timestamp, board),)
+        '%s.%s' % (timestamp, board),
+    )
 
   def _GetVanillaImageName(self, trybot_image):
     """Given a trybot artifact name, get latest vanilla image name.
 
     Args:
       trybot_image: artifact name such as
-          'trybot-daisy-release/R40-6394.0.0-b1389'
+          'daisy-release-tryjob/R40-6394.0.0-b1389'
 
     Returns:
       Latest official image name, e.g. 'daisy-release/R57-9089.0.0'.
     """
-    mo = re.search(TRYBOT_IMAGE_RE, trybot_image)
+    # We need to filter out -tryjob in the trybot_image.
+    trybot = re.sub('-tryjob', '', trybot_image)
+    mo = re.search(TRYBOT_IMAGE_RE, trybot)
     assert mo
     dirname = IMAGE_DIR.replace('\\', '').format(**mo.groupdict())
     return buildbot_utils.GetLatestImage(self._chromeos_root, dirname)
@@ -114,13 +117,14 @@
 
     Args:
       trybot_image: artifact name such as
-          'trybot-daisy-release/R40-6394.0.0-b1389'
+          'daisy-release-tryjob/R40-6394.0.0-b1389'
 
     Returns:
       Corresponding chrome PFQ image name, e.g.
       'daisy-chrome-pfq/R40-6393.0.0-rc1'.
     """
-    mo = re.search(TRYBOT_IMAGE_RE, trybot_image)
+    trybot = re.sub('-tryjob', '', trybot_image)
+    mo = re.search(TRYBOT_IMAGE_RE, trybot)
     assert mo
     image_dict = mo.groupdict()
     image_dict['image_type'] = 'chrome-pfq'
@@ -131,16 +135,6 @@
         return nonafdo_image
     return ''
 
-  def _FinishSetup(self):
-    """Make sure testing_rsa file is properly set up."""
-    # Fix protections on ssh key
-    command = ('chmod 600 /var/cache/chromeos-cache/distfiles/target'
-               '/chrome-src-internal/src/third_party/chromite/ssh_keys'
-               '/testing_rsa')
-    ret_val = self._ce.ChrootRunCommand(self._chromeos_root, command)
-    if ret_val != 0:
-      raise RuntimeError('chmod for testing_rsa failed')
-
   def _TestImages(self, trybot_image, vanilla_image, nonafdo_image):
     """Create crosperf experiment file.
 
@@ -165,6 +159,7 @@
     benchmark: all_toolchain_perf {
       suite: telemetry_Crosperf
       iterations: 0
+      run_local: False
     }
 
     benchmark: page_cycler_v2.typical_25 {
@@ -251,23 +246,19 @@
     Launch trybot, get image names, create crosperf experiment file, run
     crosperf, and copy images into seven-day report directories.
     """
-    date_str = datetime.date.today()
-    description = 'master_%s_%s_%s' % (self._patches_string, self._build,
-                                       date_str)
-    build_id, trybot_image = buildbot_utils.GetTrybotImage(
+    buildbucket_id, trybot_image = buildbot_utils.GetTrybotImage(
         self._chromeos_root,
         self._build,
         self._patches,
-        description,
-        other_flags=['--notests'],
+        tryjob_flags=['--notests'],
         build_toolchain=True)
 
     print('trybot_url: \
-       https://uberchromegw.corp.google.com/i/chromiumos.tryserver/builders/release/builds/%s'
-          % build_id)
+          http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=%s'
+          % buildbucket_id)
     if len(trybot_image) == 0:
-      self._l.LogError('Unable to find trybot_image for %s!' % description)
-      return 1
+      self._l.LogError('Unable to find trybot_image!')
+      return 2
 
     vanilla_image = self._GetVanillaImageName(trybot_image)
     nonafdo_image = self._GetNonAFDOImageName(trybot_image)
@@ -276,9 +267,6 @@
     print('vanilla_image: %s' % vanilla_image)
     print('nonafdo_image: %s' % nonafdo_image)
 
-    if os.getlogin() == ROLE_ACCOUNT:
-      self._FinishSetup()
-
     self._TestImages(trybot_image, vanilla_image, nonafdo_image)
     self._SendEmail()
     return 0
diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py
index f89bb71..911ea03 100644
--- a/cros_utils/buildbot_utils.py
+++ b/cros_utils/buildbot_utils.py
@@ -1,36 +1,25 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 """Utilities for launching and accessing ChromeOS buildbots."""
 
 from __future__ import print_function
 
-import base64
+import ast
 import json
 import os
 import re
 import time
-import urllib2
-
-# pylint: disable=no-name-in-module
-from oauth2client.service_account import ServiceAccountCredentials
 
 from cros_utils import command_executer
 from cros_utils import logger
-from cros_utils import buildbot_json
 
 INITIAL_SLEEP_TIME = 7200  # 2 hours; wait time before polling buildbot.
 SLEEP_TIME = 600  # 10 minutes; time between polling of buildbot.
-TIME_OUT = 28800  # Decide the build is dead or will never finish
-# after this time (8 hours).
-OK_STATUS = [  # List of result status values that are 'ok'.
-    # This was obtained from:
-    #   https://chromium.googlesource.com/chromium/tools/build/+/
-    #       master/third_party/buildbot_8_4p1/buildbot/status/results.py
-    0,  # "success"
-    1,  # "warnings"
-    6,  # "retry"
-]
+
+# Some of our slower builders (llmv-next) are taking more
+# than 8 hours. So, increase this TIME_OUT to 9 hours.
+TIME_OUT = 32400  # Decide the build is dead or will never finish
 
 
 class BuildbotTimeout(Exception):
@@ -38,168 +27,105 @@
   pass
 
 
-def ParseReportLog(url, build):
-  """Scrape the trybot image name off the Reports log page.
-
-  This takes the URL for a trybot Reports Stage web page,
-  and a trybot build type, such as 'daisy-release'.  It
-  opens the web page and parses it looking for the trybot
-  artifact name (e.g. something like
-  'trybot-daisy-release/R40-6394.0.0-b1389'). It returns the
-  artifact name, if found.
-  """
-  trybot_image = ''
-  url += '/text'
-  newurl = url.replace('uberchromegw', 'chromegw')
-  webpage = urllib2.urlopen(newurl)
-  data = webpage.read()
-  lines = data.split('\n')
-  for l in lines:
-    if l.find('Artifacts') > 0 and l.find('trybot') > 0:
-      trybot_name = 'trybot-%s' % build
-      start_pos = l.find(trybot_name)
-      end_pos = l.find('@https://storage')
-      trybot_image = l[start_pos:end_pos]
-
-  return trybot_image
-
-
-def GetBuildData(buildbot_queue, build_id):
-  """Find the Reports stage web page for a trybot build.
-
-  This takes the name of a buildbot_queue, such as 'daisy-release'
-  and a build id (the build number), and uses the json buildbot api to
-  find the Reports stage web page for that build, if it exists.
-  """
-  builder = buildbot_json.Buildbot(
-      'http://chromegw/p/tryserver.chromiumos/').builders[buildbot_queue]
-  build_data = builder.builds[build_id].data
-  logs = build_data['logs']
-  for l in logs:
-    fname = l[1]
-    if 'steps/Report/' in fname:
-      return fname
-
-  return ''
-
-
-def FindBuildRecordFromLog(description, build_info):
-  """Find the right build record in the build logs.
-
-  Get the first build record from build log with a reason field
-  that matches 'description'. ('description' is a special tag we
-  created when we launched the buildbot, so we could find it at this
-  point.)
-  """
-  for build_log in build_info:
-    property_list = build_log['properties']
-    for prop in property_list:
-      if len(prop) < 2:
-        continue
-      pname = prop[0]
-      pvalue = prop[1]
-      if pname == 'name' and pvalue == description:
-        return build_log
-  return {}
-
-
-def GetBuildInfo(file_dir, waterfall_builder):
-  """Get all the build records for the trybot builds."""
-
-  builder = ''
-  if waterfall_builder.endswith('-release'):
-    builder = 'release'
-  elif waterfall_builder.endswith('-gcc-toolchain'):
-    builder = 'gcc_toolchain'
-  elif waterfall_builder.endswith('-llvm-toolchain'):
-    builder = 'llvm_toolchain'
-  elif waterfall_builder.endswith('-llvm-next-toolchain'):
-    builder = 'llvm_next_toolchain'
-
-  sa_file = os.path.expanduser(
-      os.path.join(file_dir, 'cros_utils',
-                   'chromeos-toolchain-credentials.json'))
-  scopes = ['https://www.googleapis.com/auth/userinfo.email']
-
-  credentials = ServiceAccountCredentials.from_json_keyfile_name(
-      sa_file, scopes=scopes)
-  url = (
-      'https://luci-milo.appspot.com/prpc/milo.Buildbot/GetBuildbotBuildsJSON')
-
-  # NOTE: If we want to get build logs for the main waterfall builders, the
-  # 'master' field below should be 'chromeos' instead of 'chromiumos.tryserver'.
-  # Builder would be 'amd64-gcc-toolchain' or 'arm-llvm-toolchain', etc.
-
-  body = json.dumps({
-      'master': 'chromiumos.tryserver',
-      'builder': builder,
-      'include_current': True,
-      'limit': 100
-  })
-  access_token = credentials.get_access_token()
-  headers = {
-      'Accept': 'application/json',
-      'Content-Type': 'application/json',
-      'Authorization': 'Bearer %s' % access_token.access_token
-  }
-  r = urllib2.Request(url, body, headers)
-  u = urllib2.urlopen(r, timeout=60)
-  u.read(4)
-  o = json.load(u)
-  data = [base64.b64decode(item['data']) for item in o['builds']]
-  result = []
-  for d in data:
-    tmp = json.loads(d)
-    result.append(tmp)
-  return result
-
-
-def FindArchiveImage(chromeos_root, build, build_id):
-  """Returns name of the trybot artifact for board/build_id."""
+def RunCommandInPath(path, cmd):
   ce = command_executer.GetCommandExecuter()
-  command = ('gsutil ls gs://chromeos-image-archive/trybot-%s/*b%s'
-             '/chromiumos_test_image.tar.xz' % (build, build_id))
-  _, out, _ = ce.ChrootRunCommandWOutput(
-      chromeos_root, command, print_to_console=False)
-  #
-  # If build_id is not unique, there may be multiple archive images
-  # to choose from; sort them & pick the first (newest).
-  #
-  # If there are multiple archive images found, out will look something
-  # like this:
-  #
-  # 'gs://.../R35-5692.0.0-b105/chromiumos_test_image.tar.xz
-  #  gs://.../R46-7339.0.0-b105/chromiumos_test_image.tar.xz'
-  #
-  out = out.rstrip('\n')
-  tmp_list = out.split('\n')
-  # After stripping the final '\n' and splitting on any other '\n', we get
-  # something like this:
-  #  tmp_list = [ 'gs://.../R35-5692.0.0-b105/chromiumos_test_image.tar.xz' ,
-  #               'gs://.../R46-7339.0.0-b105/chromiumos_test_image.tar.xz' ]
-  #
-  #  If we sort this in descending order, we should end up with the most
-  #  recent test image first, so that's what we do here.
-  #
-  if len(tmp_list) > 1:
-    tmp_list = sorted(tmp_list, reverse=True)
-  out = tmp_list[0]
+  cwd = os.getcwd()
+  os.chdir(path)
+  status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False)
+  os.chdir(cwd)
+  return status, stdout, stderr
 
-  trybot_image = ''
-  trybot_name = 'trybot-%s' % build
-  if out and out.find(trybot_name) > 0:
-    start_pos = out.find(trybot_name)
-    end_pos = out.find('/chromiumos_test_image')
-    trybot_image = out[start_pos:end_pos]
 
-  return trybot_image
+def PeekTrybotImage(chromeos_root, buildbucket_id):
+  """Get the artifact URL of a given tryjob.
+
+  Args:
+    buildbucket_id: buildbucket-id
+    chromeos_root: root dir of chrome os checkout
+
+  Returns:
+    (status, url) where status can be 'pass', 'fail', 'running',
+                  and url looks like:
+    gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+  """
+  command = (
+      'cros buildresult --report json --buildbucket-id %s' % buildbucket_id)
+  rc, out, _ = RunCommandInPath(chromeos_root, command)
+
+  # Current implementation of cros buildresult returns fail when a job is still
+  # running.
+  if rc != 0:
+    return ('running', None)
+
+  results = json.loads(out)[buildbucket_id]
+
+  return (results['status'], results['artifacts_url'].rstrip('/'))
+
+
+def ParseTryjobBuildbucketId(msg):
+  """Find the buildbucket-id in the messages from `cros tryjob`.
+
+  Args:
+    msg: messages from `cros tryjob`
+
+  Returns:
+    buildbucket-id, which will be passed to `cros buildresult`
+  """
+  output_list = ast.literal_eval(msg)
+  output_dict = output_list[0]
+  if 'buildbucket_id' in output_dict:
+    return output_dict['buildbucket_id']
+  return None
+
+
+def SubmitTryjob(chromeos_root,
+                 buildbot_name,
+                 patch_list,
+                 tryjob_flags=None,
+                 build_toolchain=False):
+  """Calls `cros tryjob ...`
+
+  Args:
+    chromeos_root: the path to the ChromeOS root, needed for finding chromite
+                   and launching the buildbot.
+    buildbot_name: the name of the buildbot queue, such as lumpy-release or
+                   daisy-paladin.
+    patch_list: a python list of the patches, if any, for the buildbot to use.
+    tryjob_flags: See cros tryjob --help for available options.
+    build_toolchain: builds and uses the latest toolchain, rather than the
+                     prebuilt one in SDK.
+
+  Returns:
+    buildbucket id
+  """
+  patch_arg = ''
+  if patch_list:
+    for p in patch_list:
+      patch_arg = patch_arg + ' -g ' + repr(p)
+  if not tryjob_flags:
+    tryjob_flags = []
+  if build_toolchain:
+    tryjob_flags.append('--latest-toolchain')
+  tryjob_flags = ' '.join(tryjob_flags)
+
+  # Launch buildbot with appropriate flags.
+  build = buildbot_name
+  command = ('cros tryjob --yes --json --nochromesdk  %s %s %s' %
+             (tryjob_flags, patch_arg, build))
+  print('CMD: %s' % command)
+  _, out, _ = RunCommandInPath(chromeos_root, command)
+  buildbucket_id = ParseTryjobBuildbucketId(out)
+  print('buildbucket_id: %s' % repr(buildbucket_id))
+  if not buildbucket_id:
+    logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
+                                '%s' % command)
+  return buildbucket_id
 
 
 def GetTrybotImage(chromeos_root,
                    buildbot_name,
                    patch_list,
-                   build_tag,
-                   other_flags=None,
+                   tryjob_flags=None,
                    build_toolchain=False,
                    async=False):
   """Launch buildbot and get resulting trybot artifact name.
@@ -210,158 +136,71 @@
   has finished, it parses the resulting report logs to find the trybot
   artifact (if one was created), and returns that artifact name.
 
-  chromeos_root is the path to the ChromeOS root, needed for finding chromite
-  and launching the buildbot.
+  Args:
+    chromeos_root: the path to the ChromeOS root, needed for finding chromite
+                   and launching the buildbot.
+    buildbot_name: the name of the buildbot queue, such as lumpy-release or
+                   daisy-paladin.
+    patch_list: a python list of the patches, if any, for the buildbot to use.
+    tryjob_flags: See cros tryjob --help for available options.
+    build_toolchain: builds and uses the latest toolchain, rather than the
+                     prebuilt one in SDK.
+    async: don't wait for artifacts; just return the buildbucket id
 
-  buildbot_name is the name of the buildbot queue, such as lumpy-release or
-  daisy-paladin.
-
-  patch_list a python list of the patches, if any, for the buildbot to use.
-
-  build_tag is a (unique) string to be used to look up the buildbot results
-  from among all the build records.
+  Returns:
+    (buildbucket id, partial image url) e.g.
+    (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596)
   """
-  ce = command_executer.GetCommandExecuter()
-  cbuildbot_path = os.path.join(chromeos_root, 'chromite/cbuildbot')
-  base_dir = os.getcwd()
-  patch_arg = ''
-  if patch_list:
-    for p in patch_list:
-      patch_arg = patch_arg + ' -g ' + repr(p)
-  toolchain_flags = ''
-  if build_toolchain:
-    toolchain_flags += '--latest-toolchain'
-  os.chdir(cbuildbot_path)
-  if other_flags:
-    optional_flags = ' '.join(other_flags)
-  else:
-    optional_flags = ''
+  buildbucket_id = SubmitTryjob(chromeos_root, buildbot_name, patch_list,
+                                tryjob_flags, build_toolchain)
+  if async:
+    return buildbucket_id, ' '
 
-  # Launch buildbot with appropriate flags.
-  build = buildbot_name
-  description = build_tag
-  command_prefix = ''
-  if not patch_arg:
-    command_prefix = 'yes | '
-  command = ('%s ./cbuildbot --remote --nochromesdk %s'
-             ' --remote-description=%s %s %s %s' %
-             (command_prefix, optional_flags, description, toolchain_flags,
-              patch_arg, build))
-  _, out, _ = ce.RunCommandWOutput(command)
-  if 'Tryjob submitted!' not in out:
-    logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
-                                '%s' % command)
-
-  os.chdir(base_dir)
-
-  build_id = 0
-  build_status = None
-  # Wait for  buildbot to finish running (check every 10 minutes).  Wait
-  # 10 minutes before the first check to give the buildbot time to launch
-  # (so we don't start looking for build data before it's out there).
-  time.sleep(SLEEP_TIME)
-  done = False
-  pending = True
-  # pending_time is the time between when we submit the job and when the
-  # buildbot actually launches the build.  running_time is the time between
-  # when the buildbot job launches and when it finishes.  The job is
-  # considered 'pending' until we can find an entry for it in the buildbot
-  # logs.
-  pending_time = SLEEP_TIME
-  running_time = 0
-  long_slept = False
-  while not done:
-    done = True
-    build_info = GetBuildInfo(base_dir, build)
-    if not build_info:
-      if pending_time > TIME_OUT:
+  # The trybot generally takes more than 2 hours to finish.
+  # Wait two hours before polling the status.
+  time.sleep(INITIAL_SLEEP_TIME)
+  elapsed = INITIAL_SLEEP_TIME
+  status = 'running'
+  image = ''
+  while True:
+    status, image = PeekTrybotImage(chromeos_root, buildbucket_id)
+    if status == 'running':
+      if elapsed > TIME_OUT:
         logger.GetLogger().LogFatal(
-            'Unable to get build logs for target %s.' % build)
+            'Unable to get build result for target %s.' % buildbot_name)
       else:
-        pending_message = 'Unable to find build log; job may be pending.'
-        done = False
-
-    if done:
-      data_dict = FindBuildRecordFromLog(description, build_info)
-      if not data_dict:
-        # Trybot job may be pending (not actually launched yet).
-        if pending_time > TIME_OUT:
-          logger.GetLogger().LogFatal('Unable to find build record for trybot'
-                                      ' %s.' % description)
-        else:
-          pending_message = 'Unable to find build record; job may be pending.'
-          done = False
-
-      else:
-        # Now that we have actually found the entry for the build
-        # job in the build log, we know the job is actually
-        # runnning, not pending, so we flip the 'pending' flag.  We
-        # still have to wait for the buildbot job to finish running
-        # however.
-        pending = False
-        build_id = data_dict['number']
-
-        if async:
-          # Do not wait for trybot job to finish; return immediately
-          return build_id, ' '
-
-        if not long_slept:
-          # The trybot generally takes more than 2 hours to finish.
-          # Wait two hours before polling the status.
-          long_slept = True
-          time.sleep(INITIAL_SLEEP_TIME)
-          pending_time += INITIAL_SLEEP_TIME
-        if True == data_dict['finished']:
-          build_status = data_dict['results']
-        else:
-          done = False
-
-    if not done:
-      if pending:
-        logger.GetLogger().LogOutput(pending_message)
-        logger.GetLogger().LogOutput('Current pending time: %d minutes.' %
-                                     (pending_time / 60))
-        pending_time += SLEEP_TIME
-      else:
-        logger.GetLogger().LogOutput(
-            '{0} minutes passed.'.format(running_time / 60))
-        logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
-        running_time += SLEEP_TIME
-
+        wait_msg = 'Unable to find build result; job may be running.'
+        logger.GetLogger().LogOutput(wait_msg)
+      logger.GetLogger().LogOutput('{0} minutes elapsed.'.format(elapsed / 60))
+      logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
       time.sleep(SLEEP_TIME)
-      if running_time > TIME_OUT:
-        done = True
+      elapsed += SLEEP_TIME
+    else:
+      break
 
-  trybot_image = ''
-
-  if build.endswith('-toolchain'):
-    # For rotating testers, we don't care about their build_status
+  if not buildbot_name.endswith('-toolchain') and status == 'fail':
+    # For rotating testers, we don't care about their status
     # result, because if any HWTest failed it will be non-zero.
-    trybot_image = FindArchiveImage(chromeos_root, build, build_id)
-  else:
+    #
     # The nightly performance tests do not run HWTests, so if
-    # their build_status is non-zero, we do care.  In this case
+    # their status is non-zero, we do care.  In this case
     # non-zero means the image itself probably did not build.
-    if build_status in OK_STATUS:
-      trybot_image = FindArchiveImage(chromeos_root, build, build_id)
-  if not trybot_image:
-    logger.GetLogger().LogError('Trybot job %s failed with status %d;'
-                                ' no trybot image generated.' % (description,
-                                                                 build_status))
+    image = ''
 
-  logger.GetLogger().LogOutput("trybot_image is '%s'" % trybot_image)
-  logger.GetLogger().LogOutput('build_status is %d' % build_status)
-  return build_id, trybot_image
+  if not image:
+    logger.GetLogger().LogError(
+        'Trybot job (buildbucket id: %s) failed with'
+        'status %s; no trybot image generated. ' % (buildbucket_id, status))
+  else:
+    # Convert full gs path to what crosperf expects. For example, convert
+    # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+    # to
+    # trybot-elm-release-tryjob/R67-10468.0.0-b20789
+    image = '/'.join(image.split('/')[-2:])
 
-
-def GetGSContent(chromeos_root, path):
-  """gsutil cat path"""
-
-  ce = command_executer.GetCommandExecuter()
-  command = ('gsutil cat gs://chromeos-image-archive/%s' % path)
-  _, out, _ = ce.ChrootRunCommandWOutput(
-      chromeos_root, command, print_to_console=False)
-  return out
+  logger.GetLogger().LogOutput("image is '%s'" % image)
+  logger.GetLogger().LogOutput('status is %s' % status)
+  return buildbucket_id, image
 
 
 def DoesImageExist(chromeos_root, build):
@@ -386,8 +225,8 @@
     time.sleep(SLEEP_TIME)
     elapsed_time += SLEEP_TIME
 
-  logger.GetLogger().LogOutput('Image %s not found, waited for %d hours' %
-                               (build, (TIME_OUT / 3600)))
+  logger.GetLogger().LogOutput(
+      'Image %s not found, waited for %d hours' % (build, (TIME_OUT / 3600)))
   raise BuildbotTimeout('Timeout while waiting for image %s' % build)
 
 
@@ -405,6 +244,6 @@
   candidates = [[int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m]
   candidates.sort(reverse=True)
   for c in candidates:
-      build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
-      if DoesImageExist(chromeos_root, build):
-          return build
+    build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
+    if DoesImageExist(chromeos_root, build):
+      return build
diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py
new file mode 100755
index 0000000..c57b2d3
--- /dev/null
+++ b/cros_utils/buildbot_utils_unittest.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python2
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Unittest for buildbot_utils.py."""
+
+from __future__ import print_function
+
+from mock import patch
+
+import time
+import unittest
+
+from cros_utils import buildbot_utils
+from cros_utils import command_executer
+
+
+class TrybotTest(unittest.TestCase):
+  """Test for CommandExecuter class."""
+
+  old_tryjob_out = (
+      'Verifying patches...\n'
+      'Submitting tryjob...\n'
+      'Successfully sent PUT request to [buildbucket_bucket:master.chromiumos.t'
+      'ryserver] with [config:success-build] [buildbucket_id:895272114382368817'
+      '6].\n'
+      'Tryjob submitted!\n'
+      'To view your tryjobs, visit:\n'
+      '  http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbuck'
+      'etId=8952721143823688176\n'
+      '  https://uberchromegw.corp.google.com/i/chromiumos.tryserver/waterfall?'
+      'committer=laszio@chromium.org&builder=etc\n')
+  tryjob_out = (
+      '[{"buildbucket_id": "8952721143823688176", "build_config": '
+      '"cave-llvm-toolchain-tryjob", "url": '
+      '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
+  )
+
+  buildresult_out = (
+      '{"8952721143823688176": {"status": "pass", "artifacts_url":'
+      '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
+      'b20789"}}')
+
+  buildbucket_id = '8952721143823688176'
+  counter_1 = 10
+
+  def testGetTrybotImage(self):
+    with patch.object(buildbot_utils, 'SubmitTryjob') as mock_submit:
+      with patch.object(buildbot_utils, 'PeekTrybotImage') as mock_peek:
+        with patch.object(time, 'sleep', return_value=None):
+
+          def peek(_chromeos_root, _buildbucket_id):
+            self.counter_1 -= 1
+            if self.counter_1 >= 0:
+              return ('running', '')
+            return ('pass',
+                    'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
+                    'R67-10468.0.0-b20789')
+
+          mock_peek.side_effect = peek
+          mock_submit.return_value = self.buildbucket_id
+
+          # sync
+          buildbucket_id, image = buildbot_utils.GetTrybotImage(
+              '/tmp', 'falco-release-tryjob', [])
+          self.assertEqual(buildbucket_id, self.buildbucket_id)
+          self.assertEqual('trybot-elm-release-tryjob/'
+                           'R67-10468.0.0-b20789', image)
+
+          # async
+          buildbucket_id, image = buildbot_utils.GetTrybotImage(
+              '/tmp', 'falco-release-tryjob', [], async=True)
+          self.assertEqual(buildbucket_id, self.buildbucket_id)
+          self.assertEqual(' ', image)
+
+  def testSubmitTryjob(self):
+    with patch.object(command_executer.CommandExecuter,
+                      'RunCommandWOutput') as mocked_run:
+      mocked_run.return_value = (0, self.tryjob_out, '')
+      buildbucket_id = buildbot_utils.SubmitTryjob('/', 'falco-release-tryjob',
+                                                   [], [])
+      self.assertEqual(buildbucket_id, self.buildbucket_id)
+
+  def testPeekTrybotImage(self):
+    with patch.object(command_executer.CommandExecuter,
+                      'RunCommandWOutput') as mocked_run:
+      # pass
+      mocked_run.return_value = (0, self.buildresult_out, '')
+      status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
+      self.assertEqual('pass', status)
+      self.assertEqual(
+          'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
+          'R67-10468.0.0-b20789', image)
+
+      # running
+      mocked_run.return_value = (1, '', '')
+      status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
+      self.assertEqual('running', status)
+      self.assertEqual(None, image)
+
+      # fail
+      buildresult_fail = self.buildresult_out.replace('\"pass\"', '\"fail\"')
+      mocked_run.return_value = (0, buildresult_fail, '')
+      status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
+      self.assertEqual('fail', status)
+      self.assertEqual(
+          'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
+          'R67-10468.0.0-b20789', image)
+
+  def testParseTryjobBuildbucketId(self):
+    buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(self.tryjob_out)
+    self.assertEqual(buildbucket_id, self.buildbucket_id)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/cros_utils/chromeos-toolchain-credentials.json b/cros_utils/chromeos-toolchain-credentials.json
deleted file mode 100644
index aed9a52..0000000
--- a/cros_utils/chromeos-toolchain-credentials.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "type": "service_account",
-  "project_id": "chromeos-toolchain-u",
-  "private_key_id": "d0efe593ad39aad4c685273ee80e4c24bb3f2e92",
-  "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC5Rm6aqSjMNrRp\ntYNc++ec79L3QZ2MxLMYKyhlgGhppVt6p/wiSvLdI19nS5TAkKMjKv71rb9DbQlG\nfQVckiY+MlADJKi29lJrwqqNDCcRgB7CL6hgDfmhWMKonZn2MwvBVROD0gi7sY+A\nipIe92jVeqG8Gvp5kOgsBxCRV5YQok8j1FxE5fIsS2sg93VS1YAzH8uPYadWb/Z5\n9uwc8U7SL0mEPjXjsLEm8Y70zovGVjv7kOLqYBMUmROLvSouG/HrZWy9uTgxFOb5\njOhxKhDcDMPVM3g8lfc0EwPUB1NxXztoST9qBJVqdzQmHpPjeDxgru0A+zaQMkWA\ne8Sn5EQrAgMBAAECggEAUnhbe1SY6G3hWLyHQLiQnHbUPWNjselMnxF25deGoqAM\nXEiyHsGl4XGBYgVurVi0hU76NnmkjWrXmhzEajRT+ZODsiJ7RxXWEkmQiUBDk7Kn\n/mAgXsFZwMw1ucCNa93E+cXY7fBsGsAq1FjaOhZ+/6eanpSTsdEix5ZNdaS7E6Op\n9zIba9EjLIvSl435+eWq0C3aU9nd1RbbRwD6vGpgG8L/r957s+AAALTqdSZGWxJX\nEC9OKT07e76qvwAsq2BoBx5vW0xmeQdZgKrA10LLDWa7UjFbwSDJIBESYtd4rYMj\nAqg5eND0bC1RrgzI+RD/10l6Vj8bBFo/403s0P5LYQKBgQDiVGVFkrw5LSy82CGC\nvSraxPriivEweMfpkp6buMbD2Je0RMR4glc1vW5m0QUJmy+ymiIHVMCmE9xNBwbS\nRyCBnrs2+3FtdnruNdcaGh6sbTlY+qJI0rEZUdbb5OhlHZF47KW66hI6sWJ1YF8O\niLQTokW8ejybprCtl1HvEHhEbwKBgQDRkD/acZrvmcnqqmorqW6mgJEkrRF/i5Th\npDo3WegXA4irX0tNqh5w+wms8r41vUZSCZYvyi0Of9LMObVdB/gA/qVzETE0p5he\ns3Skp/VK8nF53pAUd+4dKlnCvD3TOEkIq+kxuEOs2iHJcvSjmKtMgqfMK/UtieB4\n7+MaOcbyBQKBgHOUndMVyEF6rGoUBaj6abQm++hNBDa4t360fYMQrZC+P1qz85GH\nHno3LvYar/Pj6EvRIqeTxH4LjmlXuUgRQqxvHzRI2/gGlWio3hxaUmfDr5GdDNsb\nnY1MmejZ0UQyAWQ7lbcKahzHEXzXpjOJ5ExShkJmOiVSzs8Xg6QOSRzJAoGAemYs\nRWQxQFysqJlcZaASdsGFBMzo+XwHOzt2nTmv6zEvNBj2nKgOG6MkZZVqL20bk3Lx\n+3u0kVFrR8k0+t9apQoWjHywJrb0setS55EKHfo4+RtbP/lEZFiGEM1ttt6bGat/\nCoE7VcwaC9VOufbDpm5xnzjVfQGY0EocdQbmAhkCgYB/isdqeDyafawr+38fcU1r\nX2+cK5JCrEzHIwg2QN3Z56cRrqrb+kK1H3w/F7ZfrlPSmS8XMwZV73QwieoGNIYL\nie9UZqRoZSG73FzIw5mXhWWA1adFz8HpGG5wRNshnPI2liOPwhnblfadJUfXb2br\n021vPgpsxamLjHSDSmSf6Q==\n-----END PRIVATE KEY-----\n",
-  "client_email": "mobiletc-prebuild-2@chromeos-toolchain-u.iam.gserviceaccount.com",
-  "client_id": "114495086044042319417",
-  "auth_uri": "https://accounts.google.com/o/oauth2/auth",
-  "token_uri": "https://accounts.google.com/o/oauth2/token",
-  "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
-  "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/mobiletc-prebuild-2%40chromeos-toolchain-u.iam.gserviceaccount.com"
-}
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index 939ed66..f9034b8 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -18,7 +18,8 @@
 import logger
 
 CHROMEOS_SCRIPTS_DIR = '~/trunk/src/scripts'
-TOOLCHAIN_UTILS_PATH = '~/trunk/src/platform/dev/toolchain_utils.sh'
+TOOLCHAIN_UTILS_PATH = ('/mnt/host/source/src/third_party/toolchain-utils/'
+                        'cros_utils/toolchain_utils.sh')
 
 
 def GetChromeOSVersionFromLSBVersion(lsb_version):
diff --git a/cros_utils/toolchain_utils.sh b/cros_utils/toolchain_utils.sh
new file mode 100644
index 0000000..5e9a2a3
--- /dev/null
+++ b/cros_utils/toolchain_utils.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO: Convert this to python.
+
+get_all_board_toolchains()
+{
+  cros_setup_toolchains --show-board-cfg="$1" | sed 's:,: :g'
+}
+
+get_ctarget_from_board()
+{
+  local all_toolchains=( $(get_all_board_toolchains "$@") )
+  echo "${all_toolchains[0]}"
+}
+
+get_board_arch()
+{
+  local ctarget=$(get_ctarget_from_board "$@")
+
+  # Ask crossdev what the magical portage arch is!
+  local arch=$(eval $(crossdev --show-target-cfg "${ctarget}"); echo ${arch})
+  if [[ -z ${arch} ]] ; then
+    error "Unable to determine ARCH from toolchain: ${ctarget}"
+    return 1
+  fi
+
+  echo "${arch}"
+  return 0
+}
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index bbb1cdf..60ac778 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -12,9 +12,9 @@
     'octane': 0.015,
     'kraken': 0.019,
     'speedometer': 0.007,
+    'speedometer2': 0.006,
     'dromaeo.domcoreattr': 0.023,
     'dromaeo.domcoremodify': 0.011,
-    'smoothness.tough_webgl_cases': 0.025,
     'graphics_WebGLAquarium': 0.008,
     'page_cycler_v2.typical_25': 0.021,
 }
diff --git a/crosperf/default-telemetry-results.json b/crosperf/default-telemetry-results.json
index 7099ac7..240664b 100644
--- a/crosperf/default-telemetry-results.json
+++ b/crosperf/default-telemetry-results.json
@@ -1,174 +1,155 @@
 {
   "peacekeeper.html": [
-    "Total__Score", 
-    "workerContrast01__Score", 
+    "Total__Score",
+    "workerContrast01__Score",
     "workerContrast02__Score"
-  ], 
+  ],
   "page_cycler_v2.intl_hi_ru": [
-    "cold_times__page_load_time", 
+    "cold_times__page_load_time",
     "warm_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "smoothness.tough_webgl_cases": [
-    "percentage_smooth__percentage_smooth", 
+    "percentage_smooth__percentage_smooth",
     "percentage_smooth__summary"
-  ], 
+  ],
   "page_cycler_v2.intl_es_fr_pt-BR": [
-    "cold_times__page_load_time", 
+    "cold_times__page_load_time",
     "warm_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
-  "dromaeo.jslibeventjquery": [
-    "jslib_event_jquery__jslib_event_jquery"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "browsermark": [
     "Score__Score"
-  ], 
+  ],
   "smoothness.top_25": [
-    "frame_times__frame_times", 
+    "frame_times__frame_times",
     "mean_frame_time__mean_frame_time"
-  ], 
+  ],
   "page_cycler_v2.morejs": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.dhtml": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.bloat": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
-  "dromaeo.jslibstyleprototype": [
-    "jslib_style_prototype__jslib_style_prototype"
-  ], 
-  "dromaeo.jslibstylejquery": [
-    "jslib_style_jquery__jslib_style_jquery"
-  ], 
-  "dromaeo.jslibeventprototype": [
-    "jslib_event_prototype__jslib_event_prototype"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.moz": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "speedometer": [
-    "Total__Total", 
+    "Total__Total",
     "Total__summary"
-  ], 
+  ],
+  "speedometer2": [
+    "Total__summary"
+  ],
   "octane": [
     "Total__Score"
-  ], 
+  ],
   "jsgamebench": [
     "Score__Score"
-  ], 
+  ],
   "page_cycler_v2.indexed_db.basic_insert": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "spaceport": [
     "Score__Score"
-  ], 
-  "dromaeo.jslibtraverseprototype": [
-    "jslib_traverse_prototype__jslib_traverse_prototype"
-  ], 
+  ],
   "page_cycler_v2.netsim.top_10": [
-    "cold_times__page_load_time", 
+    "cold_times__page_load_time",
     "warm_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
-  "robohornet_pro": [
-    "Total__Total",
-    "Total__summary"
-  ], 
-  "dromaeo.domcoreattr": [
-    "dom_attr__dom_attr", 
-    "dom__summary"
-  ], 
-  "dromaeo.jslibattrprototype": [
-    "jslib_attr_prototype__jslib_attr_prototype"
-  ], 
-  "sunspider": [
-    "Total__Total",
-    "Total__summary"
-  ], 
-  "dromaeo.jslibattrjquery": [
-    "jslib_attr_jquery__jslib_attr_jquery"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.typical_25": [
-    "warm_times-page_load_time__warm_times-page_load_time", 
+    "warm_times-page_load_time__warm_times-page_load_time",
     "cold_times-page_load_time__cold_times-page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
-  "dromaeo.domcoretraverse": [
-    "dom_traverse__dom_traverse",
-    "dom__summary"
-  ], 
-  "dromaeo.domcoremodify": [
-    "dom_modify__dom_modify", 
-    "dom__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.intl_ar_fa_he": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "page_cycler_v2.intl_ja_zh": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "graphics_WebGLAquarium": [
     "avg_fps_1000_fishes",
     "avg_fps_1000_fishes__summary"
-  ], 
+  ],
   "page_cycler_v2.intl_ko_th_vi": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "canvasmark": [
     "Score__Score"
-  ], 
-  "dromaeo.domcorequery": [
-    "dom_query__dom_query",
-    "dom__summary"
-  ], 
-  "dromaeo.jslibtraversejquery": [
-    "jslib_traverse_jquery__jslib_traverse_jquery"
-  ], 
-  "dromaeo.jslibmodifyprototype": [
-    "jslib_modify_prototype__jslib_modify_prototype"
-  ], 
+  ],
   "page_cycler_v2.tough_layout_cases": [
-    "warm_times__page_load_time", 
+    "warm_times__page_load_time",
     "cold_times__page_load_time",
     "pcv1-warm@@timeToOnload_avg__summary",
-    "pcv1-cold@@timeToOnload_avg__summary"
-  ], 
+    "pcv1-cold@@timeToOnload_avg__summary",
+    "cold@@timeToOnload_avg__summary",
+    "warm@@timeToOnload_avg__summary"
+  ],
   "kraken": [
-    "Total__Total", 
+    "Total__Total",
     "Total__summary"
-  ], 
-  "dromaeo.jslibmodifyjquery": [
-    "jslib_modify_jquery__jslib_modify_jquery"
+  ],
+  "jetstream": [
+    "Score__summary"
+  ],
+  "cros_ui_smoothness": [
+    "ui_percentage_smooth__summary"
   ]
 }
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
index 619068f..c3a8cc7 100644
--- a/crosperf/default_remotes
+++ b/crosperf/default_remotes
@@ -1,8 +1,13 @@
-x86-alex  : chromeos2-row9-rack10-host1.cros  chromeos2-row9-rack10-host3.cros  chromeos2-row9-rack10-host5.cros
-lumpy     : chromeos2-row9-rack9-host9.cros   chromeos2-row9-rack9-host11.cros  chromeos2-row9-rack9-host13.cros
-parrot    : chromeos2-row9-rack9-host15.cros  chromeos2-row9-rack9-host17.cros  chromeos2-row9-rack9-host19.cros
-daisy     : chromeos2-row9-rack9-host3.cros   chromeos2-row9-rack9-host5.cros   chromeos2-row9-rack9-host7.cros
-peach_pit : chromeos2-row9-rack10-host13.cros chromeos2-row9-rack10-host15.cros chromeos2-row9-rack10-host17.cros
-peppy     : chromeos2-row9-rack10-host19.cros chromeos2-row9-rack10-host21.cros chromeos2-row9-rack9-host1.cros
-squawks   : chromeos2-row9-rack10-host7.cros  chromeos2-row9-rack10-host9.cros  chromeos2-row9-rack10-host11.cros
-elm       : chromeos2-row9-rack8-host19.cros  chromeos2-row9-rack8-host21.cros
+daisy         : chromeos2-row9-rack9-host7.cros
+peach_pit     : chromeos2-row9-rack10-host13.cros chromeos2-row9-rack10-host15.cros chromeos2-row9-rack10-host17.cros
+peppy         : chromeos2-row9-rack10-host19.cros chromeos2-row9-rack10-host21.cros chromeos2-row9-rack9-host1.cros
+squawks       : chromeos2-row9-rack10-host7.cros  chromeos2-row9-rack10-host9.cros  chromeos2-row9-rack10-host11.cros
+elm           : chromeos2-row9-rack8-host19.cros  chromeos2-row9-rack8-host21.cros
+bob           : chromeos2-row9-rack7-host1.cros   chromeos2-row9-rack7-host3.cros
+chell         : chromeos2-row9-rack8-host3.cros   chromeos2-row9-rack8-host5.cros
+falco         : chromeos2-row9-rack8-host13.cros  chromeos2-row9-rack8-host11.cros
+kefka         : chromeos2-row9-rack9-host21.cros  chromeos2-row9-rack8-host1.cros
+lulu          : chromeos2-row9-rack8-host9.cros   chromeos2-row9-rack8-host7.cros
+nautilus      : chromeos2-row9-rack7-host11.cros  chromeos2-row9-rack7-host9.cros
+snappy        : chromeos2-row9-rack7-host5.cros   chromeos2-row9-rack7-host7.cros
+veyron_minnie : chromeos2-row9-rack8-host15.cros  chromeos2-row9-rack8-host17.cros
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 9d58048..bd25c78 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -22,8 +22,8 @@
 # to run together.
 
 telemetry_perfv2_tests = [
-    'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery',
-    'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider'
+    'kraken',
+    'octane',
 ]
 
 telemetry_pagecycler_tests = [
@@ -32,27 +32,24 @@
     'page_cycler_v2.intl_hi_ru',
     'page_cycler_v2.intl_ja_zh',
     'page_cycler_v2.intl_ko_th_vi',
-    #                              'page_cycler_v2.morejs',
-    #                              'page_cycler_v2.moz',
-    #                              'page_cycler_v2.netsim.top_10',
-    'page_cycler_v2.tough_layout_cases',
-    'page_cycler_v2.typical_25'
+    'page_cycler_v2.typical_25',
 ]
 
 telemetry_toolchain_old_perf_tests = [
-    'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR',
-    'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh',
-    'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10',
-    'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport',
-    'tab_switching.top_10'
+    'page_cycler_v2.intl_es_fr_pt-BR',
+    'page_cycler_v2.intl_hi_ru',
+    'page_cycler_v2.intl_ja_zh',
+    'page_cycler_v2.intl_ko_th_vi',
+    'page_cycler_v2.netsim.top_10',
+    'page_cycler_v2.typical_25',
+    'spaceport',
+    'tab_switching.top_10',
 ]
 telemetry_toolchain_perf_tests = [
     'octane',
     'kraken',
     'speedometer',
-    'dromaeo.domcoreattr',
-    'dromaeo.domcoremodify',
-    'smoothness.tough_webgl_cases',
+    'speedometer2',
 ]
 graphics_perf_tests = [
     'graphics_GLBench',
@@ -65,9 +62,9 @@
     'octane',
     'kraken',
     'speedometer',
+    'speedometer2',
     'jetstream',
-    'startup.cold.blank_page',
-    'smoothness.top_25_smooth',
+    'cros_ui_smoothness',
 ]
 crosbolt_perf_tests = [
     'graphics_WebGLAquarium',
@@ -76,7 +73,6 @@
     'video_WebRtcPerf',
     'BootPerfServerCrosPerf',
     'power_Resume',
-    'video_PlaybackPerf.h264',
     'build_RootFilesystemSize',
 ]
 
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 44090e5..b7d3420 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -219,21 +219,20 @@
 
   def test_get_default_remotes(self):
     board_list = [
-        'x86-alex', 'lumpy', 'elm', 'parrot', 'daisy', 'peach_pit', 'peppy',
-        'squawks'
+        'lumpy', 'elm', 'parrot', 'daisy', 'peach_pit', 'peppy', 'squawks'
     ]
 
     ef = ExperimentFactory()
     self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
 
-    # Verify that we have entries for every board, and that we get three
-    # machines back for each board.
+    # Verify that we have entries for every board, and that we get at least
+    # two machines for each board.
     for b in board_list:
       remotes = ef.GetDefaultRemotes(b)
-      if b == 'elm':
-        self.assertEqual(len(remotes), 2)
+      if b == 'daisy':
+        self.assertEqual(len(remotes), 1)
       else:
-        self.assertEqual(len(remotes), 3)
+        self.assertGreaterEqual(len(remotes), 2)
 
 
 if __name__ == '__main__':
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index bd27f28..b4b669a 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -74,8 +74,8 @@
         self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
                               (benchmark.name, benchmark.retries - i))
       elif i > 0:
-        self.logger.LogOutput('benchmark %s succeded after %s retries' %
-                              (benchmark.name, i))
+        self.logger.LogOutput(
+            'benchmark %s succeded after %s retries' % (benchmark.name, i))
         break
       else:
         self.logger.LogOutput(
@@ -90,7 +90,10 @@
         'set -e && '
         # Disable Turbo in Intel pstate driver
         'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
-        'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+        '  if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo;  then '
+        '    echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
+        '  fi; '
+        'fi; '
         # Set governor to performance for each cpu
         'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
         'cd $f; '
@@ -125,23 +128,20 @@
     FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py'
     ret = self._ce.CrosRunCommand(
         'ls ' + FILE, machine=machine_name, chromeos_root=chromeos_root)
-    self.logger.LogFatalIf(ret, 'Could not find {} on machine: {}'.format(
-        FILE, machine_name))
+    self.logger.LogFatalIf(
+        ret, 'Could not find {} on machine: {}'.format(FILE, machine_name))
 
     if not ret:
       sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
       ret = self._ce.CrosRunCommand(
           sed_command + FILE, machine=machine_name, chromeos_root=chromeos_root)
-      self.logger.LogFatalIf(ret, 'Could not modify {} on machine: {}'.format(
-          FILE, machine_name))
+      self.logger.LogFatalIf(
+          ret, 'Could not modify {} on machine: {}'.format(FILE, machine_name))
 
-  def RebootMachine(self, machine_name, chromeos_root):
-    command = 'reboot && exit'
+  def RestartUI(self, machine_name, chromeos_root):
+    command = 'stop ui; sleep 5; start ui'
     self._ce.CrosRunCommand(
         command, machine=machine_name, chromeos_root=chromeos_root)
-    time.sleep(60)
-    # Whenever we reboot the machine, we need to restore the governor settings.
-    self.PinGovernorExecutionFrequencies(machine_name, chromeos_root)
 
   def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
     """Run the test_that test.."""
@@ -158,7 +158,8 @@
 
     # We do this because some tests leave the machine in weird states.
     # Rebooting between iterations has proven to help with this.
-    self.RebootMachine(machine, label.chromeos_root)
+    # But the beep is anoying, we will try restart ui.
+    self.RestartUI(machine, label.chromeos_root)
 
     autotest_dir = AUTOTEST_DIR
     if label.autotest_path != '':
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 78bdfbd..d7b9e77 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -134,9 +134,9 @@
     self.assertTrue(self.call_telemetry_run)
     self.assertFalse(self.call_test_that_run)
     self.assertFalse(self.call_telemetry_crosperf_run)
-    self.assertEqual(self.telemetry_run_args, [
-        'fake_machine', self.mock_label, self.telemetry_bench, ''
-    ])
+    self.assertEqual(
+        self.telemetry_run_args,
+        ['fake_machine', self.mock_label, self.telemetry_bench, ''])
 
     reset()
     self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
@@ -145,9 +145,9 @@
     self.assertFalse(self.call_telemetry_run)
     self.assertTrue(self.call_test_that_run)
     self.assertFalse(self.call_telemetry_crosperf_run)
-    self.assertEqual(self.test_that_args, [
-        'fake_machine', self.mock_label, self.test_that_bench, '', ''
-    ])
+    self.assertEqual(
+        self.test_that_args,
+        ['fake_machine', self.mock_label, self.test_that_bench, '', ''])
 
     reset()
     self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
@@ -171,7 +171,10 @@
         'set -e && '
         # Disable Turbo in Intel pstate driver
         'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
-        'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+        '  if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo;  then '
+        '    echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
+        '  fi; '
+        'fi; '
         # Set governor to performance for each cpu
         'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
         'cd $f; '
@@ -191,11 +194,10 @@
 
     self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
     self.runner.PinGovernorExecutionFrequencies = FakePinGovernor
-    self.runner.RebootMachine('lumpy1.cros', '/tmp/chromeos')
+    self.runner.RestartUI('lumpy1.cros', '/tmp/chromeos')
     self.assertEqual(mock_cros_runcmd.call_count, 1)
-    self.assertEqual(mock_cros_runcmd.call_args_list[0][0], ('reboot && exit',))
-    self.assertEqual(mock_sleep.call_count, 1)
-    self.assertEqual(mock_sleep.call_args_list[0][0], (60,))
+    self.assertEqual(mock_cros_runcmd.call_args_list[0][0],
+                     ('stop ui; sleep 5; start ui',))
 
   @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
   @mock.patch.object(command_executer.CommandExecuter,
@@ -228,7 +230,7 @@
     self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
     res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
                                     self.test_that_bench, '--iterations=2', '')
-    self.assertEqual(mock_cros_runcmd.call_count, 1)
+    self.assertEqual(mock_cros_runcmd.call_count, 2)
     self.assertEqual(mock_chroot_runcmd.call_count, 1)
     self.assertEqual(res, 0)
     self.assertEqual(mock_cros_runcmd.call_args_list[0][0],
diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py
new file mode 100644
index 0000000..f68fe9c
--- /dev/null
+++ b/debug_info_test/check_cus.py
@@ -0,0 +1,67 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+
+import check_ngcc
+
+cu_checks = [check_ngcc.not_by_gcc]
+
+def check_compile_unit(dso_path, producer, comp_path):
+    """check all compiler flags used to build the compile unit.
+
+    Args:
+        dso_path: path to the elf/dso
+        producer: DW_AT_producer contains the compiler command line.
+        comp_path: DW_AT_comp_dir + DW_AT_name
+
+    Returns:
+        A set of failed tests.
+    """
+    failed = set()
+    for c in cu_checks:
+        if not c(dso_path, producer, comp_path):
+            failed.add(c.__module__)
+
+    return failed
+
+def check_compile_units(dso_path):
+    """check all compile units in the given dso.
+
+    Args:
+        dso_path: path to the dso
+    Return:
+        True if everything looks fine otherwise False.
+    """
+
+    failed = set()
+    producer = ''
+    comp_path = ''
+
+    readelf = subprocess.Popen(['readelf', '--debug-dump=info',
+                                '--dwarf-depth=1', dso_path],
+                                stdout=subprocess.PIPE,
+                                stderr=open(os.devnull, 'w'))
+    for l in readelf.stdout:
+        if 'DW_TAG_compile_unit' in l:
+            if producer:
+                failed = failed.union(check_compile_unit(dso_path, producer,
+                                                         comp_path))
+            producer = ''
+            comp_path = ''
+        elif 'DW_AT_producer' in l:
+            producer = l
+        elif 'DW_AT_name' in l:
+            comp_path = os.path.join(comp_path, l.split(':')[-1].strip())
+        elif 'DW_AT_comp_dir' in l:
+            comp_path = os.path.join(l.split(':')[-1].strip(), comp_path)
+    if producer:
+        failed = failed.union(check_compile_unit(dso_path, producer, comp_path))
+
+    if failed:
+        print('%s failed check: %s' % (dso_path, ' '.join(failed)))
+        return False
+
+    return True
diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py
new file mode 100644
index 0000000..5e7cce1
--- /dev/null
+++ b/debug_info_test/check_exist.py
@@ -0,0 +1,90 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+
+from whitelist import is_whitelisted
+
+def check_debug_info(dso_path, readelf_content):
+    """check whether debug info section exists in the elf file.
+
+    Args:
+        readelf: debug info dumped by command readelf
+
+    Returns:
+        True if debug info section exists, otherwise False.
+    """
+
+    # Return True if it is whitelisted
+    if is_whitelisted('exist_debug_info', dso_path):
+        return True
+
+    for l in readelf_content:
+        if 'debug_info' in l:
+          return True
+    return False
+
+def check_producer(dso_path, readelf_content):
+    """check whether DW_AT_producer exists in each compile unit.
+
+    Args:
+        readelf: debug info dumped by command readelf
+
+    Returns:
+        True if DW_AT_producer exists in each compile unit, otherwise False.
+        Notice: If no compile unit in DSO, also return True.
+    """
+
+    # Return True if it is whitelisted
+    if is_whitelisted('exist_producer', dso_path):
+        return True
+
+    # Indicate if there is a producer under each cu
+    cur_producer = False
+
+    first_cu = True
+    producer_exist = True
+
+    for l in readelf_content:
+        if 'DW_TAG_compile_unit' in l:
+            if not first_cu and not cur_producer:
+                producer_exist = False
+                break
+            first_cu = False
+            cur_producer = False
+        elif 'DW_AT_producer' in l:
+            cur_producer = True
+
+    # Check whether last producer of compile unit exists in the elf,
+    # also return True if no cu in the DSO.
+    if not first_cu and not cur_producer:
+        producer_exist = False
+
+    return producer_exist
+
+def check_exist_all(dso_path):
+    """check whether intended components exists in the given dso.
+
+    Args:
+        dso_path: path to the dso
+    Return:
+        True if everything looks fine otherwise False.
+    """
+
+    readelf = subprocess.Popen(['readelf', '--debug-dump=info',
+                                '--dwarf-depth=1', dso_path],
+                                stdout=subprocess.PIPE,
+                                stderr=open(os.devnull, 'w'))
+    readelf_content = list(readelf.stdout)
+
+    exist_checks = [check_debug_info, check_producer]
+
+    for e in exist_checks:
+        if not e(dso_path, readelf_content):
+            check_failed = e.__module__ + ': ' + e.__name__
+            print('%s failed check: %s' % (dso_path, check_failed))
+            return False
+
+    return True
diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py
new file mode 100644
index 0000000..4ac67db
--- /dev/null
+++ b/debug_info_test/check_icf.py
@@ -0,0 +1,47 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import subprocess
+
+def check_identical_code_folding(dso_path):
+    """check whether chrome was built with identical code folding.
+
+    Args:
+        dso_path: path to the dso
+    Return:
+        False if the dso is chrome and it was not built with icf,
+        True otherwise.
+    """
+
+    if not dso_path.endswith('/chrome.debug'):
+        return True
+
+    # Run 'nm' on the chrome binary and read the output.
+    nm = subprocess.Popen(['nm', dso_path],
+                          stdout=subprocess.PIPE,
+                          stderr=open(os.devnull, 'w'))
+    nm_output, _ = nm.communicate()
+
+    # Search for addresses of text symbols.
+    text_addresses = re.findall('^[0-9a-f]+[ ]+[tT] ',
+                                nm_output,
+                                re.MULTILINE)
+
+    # Calculate number of text symbols in chrome binary.
+    num_text_addresses = len(text_addresses)
+
+    # Calculate number of unique text symbols in chrome binary.
+    num_unique_text_addresses = len(set(text_addresses))
+
+    # Check that the number of duplicate symbols is at least 10,000.
+    #   - https://crbug.com/813272#c18
+    if num_text_addresses-num_unique_text_addresses >= 10000:
+        return True
+
+    print('%s was not built with ICF' % dso_path)
+    print('    num_text_addresses = %d' % num_text_addresses)
+    print('    num_unique_text_addresses = %d' % num_unique_text_addresses)
+    return False
diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py
new file mode 100644
index 0000000..eecbb85
--- /dev/null
+++ b/debug_info_test/check_ngcc.py
@@ -0,0 +1,26 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from whitelist import is_whitelisted
+
+def not_by_gcc(dso_path, producer, comp_path):
+    """Check whether the compile unit is not built by gcc.
+
+    Args:
+        dso_path: path to the elf/dso
+        producer: DW_AT_producer contains the compiler command line.
+        comp_path: DW_AT_comp_dir + DW_AT_name
+
+    Returns:
+        False if compiled by gcc otherwise True
+    """
+    if is_whitelisted('ngcc_comp_path', comp_path):
+        return True
+
+    if is_whitelisted('ngcc_dso_path', dso_path):
+        return True
+
+    if 'GNU C' in producer:
+        return False
+    return True
diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py
new file mode 100755
index 0000000..f5afd64
--- /dev/null
+++ b/debug_info_test/debug_info_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+import check_icf
+import check_cus
+import check_exist
+
+elf_checks = [check_exist.check_exist_all,
+              check_cus.check_compile_units,
+              check_icf.check_identical_code_folding]
+
+def scanelf(root):
+    """find ELFs in root
+
+    Args:
+        root: root dir to start with the search.
+    Returns:
+        Filenames of ELFs in root.
+    """
+    p = subprocess.Popen(['scanelf', '-y', '-B', '-F', '%F', '-R', root],
+                         stdout=subprocess.PIPE)
+    return [l.strip() for l in p.stdout]
+
+def Main(argv):
+    if len(argv) < 2:
+        print('usage: %s [file|dir]')
+        return 1
+
+    files = []
+    cand = argv[1]
+    if os.path.isfile(cand):
+        files = [cand]
+    elif os.path.isdir(cand):
+        files = scanelf(cand)
+    else:
+        print('usage: %s [file|dir]')
+        return 1
+
+    failed = False
+    for f in files:
+        for c in elf_checks:
+            if not c(f):
+                failed = True
+
+    if failed:
+        return 1
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(Main(sys.argv))
diff --git a/debug_info_test/exist_debug_info.whitelist b/debug_info_test/exist_debug_info.whitelist
new file mode 100644
index 0000000..417607c
--- /dev/null
+++ b/debug_info_test/exist_debug_info.whitelist
@@ -0,0 +1,12 @@
+# To hide existing failures that some DSOs may have no debug info.
+.*/usr/bin/memdiskfind\.debug
+.*/usr/bin/isohybrid\.debug
+.*/usr/bin/gethostip\.debug
+.*/usr/lib.*/libevent-.*\.so.*\.debug
+.*/usr/lib.*/libcares\.so.*\.debug
+.*/usr/lib64/libdcerpc-samr\.so.*\.debug
+.*/usr/lib64/libGLESv2\.so.*\.debug
+.*/usr/lib64/python2.7/site-packages/selenium/webdriver/firefox/.*/x_ignore_nofocus\.so\.debug
+.*/lib.*/libiptc\.so.*\.debug
+.*/autotest/.*\.debug
+# todos:
diff --git a/debug_info_test/exist_producer.whitelist b/debug_info_test/exist_producer.whitelist
new file mode 100644
index 0000000..ee75de7
--- /dev/null
+++ b/debug_info_test/exist_producer.whitelist
@@ -0,0 +1,8 @@
+# To hide existing failures that producer not in certain compiler units.
+.*/opt/google/chrome/libosmesa\.so\.debug
+.*/opt/google/chrome/chrome-sandbox\.debug
+.*/opt/google/chrome/chrome\.debug
+.*/opt/google/chrome/libosmesa\.so\.debug
+.*/opt/google/chrome/nacl_helper\.debug
+.*/usr/local/chromedriver/chromedriver\.debug
+# todos:
diff --git a/debug_info_test/ngcc_comp_path.whitelist b/debug_info_test/ngcc_comp_path.whitelist
new file mode 100644
index 0000000..45c5b4a
--- /dev/null
+++ b/debug_info_test/ngcc_comp_path.whitelist
@@ -0,0 +1,24 @@
+# CrOS packages are compiled in /tmp/$board/portage/${CATEGORY}/${P}.
+# They can be matched by .*/portage/${CATEGORY}/${PN}-.*
+.*/portage/chromeos-base/ec-utils-.*
+.*/portage/dev-libs/elfutils-.*
+.*/portage/dev-libs/libusb-.*
+.*/portage/dev-util/perf-.*
+.*/portage/media-libs/arc-cros-gralloc-.*
+.*/portage/media-video/yavta-.*
+.*/portage/sys-apps/cavium-n3fips-driver-.*
+.*/portage/sys-apps/cavium-n3fips-tools-.*
+.*/portage/sys-apps/busybox-.*
+.*/portage/sys-apps/snaggletooth-drivers-.*
+.*/portage/sys-boot/syslinux-.*
+.*/portage/sys-kernel/chromeos-kernel-.*
+.*/portage/sys-kernel/fzm-kmod.*
+.*/portage/sys-kernel/kernel-.*
+.*/portage/sys-kernel/.*-kernel.*
+.*/portage/sys-kernel/ti-uio-module-drv-.*
+.*/portage/sys-libs/gcc-libs-.*
+# glibc and libgcc are built in different ways.
+# and libstdc++.
+.*/glibc-.*/
+.*/libgcc/.*
+.*/libstdc\+\+-.*
diff --git a/debug_info_test/ngcc_dso_path.whitelist b/debug_info_test/ngcc_dso_path.whitelist
new file mode 100644
index 0000000..8d63a52
--- /dev/null
+++ b/debug_info_test/ngcc_dso_path.whitelist
@@ -0,0 +1,23 @@
+# DSOs specified here are not CrOS packages compiled within CrOS SDK.
+# CrOS packages should be whitelisted in *_comp_path.whitelist
+# modules we don't care:
+.*/binutils/.*
+.*/binutils-bin/.*
+.*/boot/u-boot\.debug
+.*/boot/vmlinux\.debug
+.*/uboot/u-boot\.debug
+.*/firmware/u-boot\.debug
+.*/libpepflashplayer\.so\.debug
+.*/opt/punybench/bin/.*
+.*/opt/scribe/grpc/_cython/cygrpc\.so\.debug
+.*/telemetry_dep/.*
+.*/unixbench/.*
+.*/usr/bin/core_collector32\.debug
+.*/usr/bin/kubelet\.debug
+.*/usr/lib/libatomic.so.*\.debug
+.*/usr/lib/librk_aiq\.so\.debug
+.*/usr/local/build/autotest/client/site_tests/factory_Leds/src/ec_ctl\.debug
+.*/opt/google/containers/android/.*
+.*/libmali\.so.*\.debug
+.*/pyelftools/examples/sample_exe64\.elf\.debug
+# todos:
diff --git a/debug_info_test/whitelist.py b/debug_info_test/whitelist.py
new file mode 100644
index 0000000..383fcc3
--- /dev/null
+++ b/debug_info_test/whitelist.py
@@ -0,0 +1,57 @@
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import glob
+import re
+
+# Matching a string of length m in an NFA of size n is O(mn^2), but the
+# performance also depends largely on the implementation. It appears to be fast
+# enough according to the tests.
+#
+# The performance bottleneck of this script is readelf. Unless this becomes
+# slower than readelf, don't waste time here.
+def is_whitelisted(list_name, pattern):
+    """chech whether the given pattern is specified in the whitelist.
+
+    Args:
+        list_name: name of the whitelist
+        pattern: the target string
+    Returns:
+        True if matched otherwise False
+    """
+    return pattern and whitelists[list_name].match(pattern)
+
+def prepare_whitelist(patterns):
+    """Join and compile the re patterns.
+
+    Args:
+        patterns: regex patterns.
+    Return:
+        A compiled re object
+    """
+    return re.compile('|'.join(patterns))
+
+def load_whitelists(dirname):
+    """Load whitelists under dirname.
+
+    A whitelist ends with .whitelist.
+
+    Args:
+        dirname: path to the dir.
+    Returns:
+        A dictionary of 'filename' -> whitelist matcher.
+    """
+    wlist = {}
+    for fn in glob.glob(os.path.join(dirname, '*.whitelist')):
+        key = os.path.splitext(os.path.basename(fn))[0]
+        with open(fn, 'r') as f:
+            patterns = f.read().splitlines()
+            patterns = [l for l in patterns if l != '']
+            patterns = [l for l in patterns if l[0] != '#']
+        wlist[key] = prepare_whitelist(patterns)
+    return wlist
+
+
+whitelists = load_whitelists(os.path.dirname(__file__))
diff --git a/generate-waterfall-reports.py b/generate-waterfall-reports.py
index 8a80905..a67cd6c 100755
--- a/generate-waterfall-reports.py
+++ b/generate-waterfall-reports.py
@@ -39,11 +39,9 @@
 # The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM
 # LISTED IN THE REPORT.
 WATERFALL_BUILDERS = [
-    'amd64-gcc-toolchain', 'arm-gcc-toolchain', 'arm64-gcc-toolchain',
-    'x86-gcc-toolchain', 'amd64-llvm-toolchain', 'arm-llvm-toolchain',
-    'arm64-llvm-toolchain', 'x86-llvm-toolchain', 'amd64-llvm-next-toolchain',
-    'arm-llvm-next-toolchain', 'arm64-llvm-next-toolchain',
-    'x86-llvm-next-toolchain'
+    'amd64-llvm-next-toolchain',
+    'arm-llvm-next-toolchain',
+    'arm64-llvm-next-toolchain',
 ]
 
 DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/'
@@ -81,11 +79,10 @@
   return date_str
 
 
-def EmailReport(report_file, report_type, date):
+def EmailReport(report_file, report_type, date, email_to):
   subject = '%s Waterfall Summary report, %s' % (report_type, date)
-  email_to = getpass.getuser()
   sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr'
-  command = ('%s --to=%s@google.com --subject="%s" --body_file=%s' %
+  command = ('%s --to=%s --subject="%s" --body_file=%s' %
              (sendgmr_path, email_to, subject, report_file))
   command_executer.GetCommandExecuter().RunCommand(command)
 
@@ -526,6 +523,7 @@
      and calls RecordFailures, to update our test failure data.
   """
 
+  print('Parsing file %s' % log_file)
   lines = []
   with open(log_file, 'r') as infile:
     lines = infile.readlines()
@@ -705,7 +703,13 @@
     conflicting_failure_options = True
     parser.error('Cannot specify both --failures_report and --omit_failures.')
 
-  return not too_many_options and not conflicting_failure_options
+  email_ok = True
+  if options.email and options.email.find('@') == -1:
+    email_ok = False
+    parser.error('"%s" is not a valid email address; it must contain "@..."' %
+                 options.email)
+
+  return not too_many_options and not conflicting_failure_options and email_ok
 
 
 def Main(argv):
@@ -748,6 +752,11 @@
       default=0,
       type=int,
       help='The date YYYYMMDD of waterfall report.')
+  parser.add_argument(
+      '--email',
+      dest='email',
+      default='',
+      help='Email address to use for sending the report.')
 
   options = parser.parse_args(argv)
 
@@ -793,6 +802,8 @@
         test_summary, report_date, board, tmp_date, color = ParseLogFile(
             target, test_data_dict, failure_dict, test, builder, buildnum,
             build_link)
+        if not test_summary:
+          continue
 
         if tmp_date != 0:
           int_date = tmp_date
@@ -806,20 +817,25 @@
 
   PruneOldFailures(failure_dict, int_date)
 
+  if options.email:
+    email_to = options.email
+  else:
+    email_to = getpass.getuser()
+
   if waterfall_report_dict and not rotating_only and not failures_report:
     main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict,
                                           'main', int_date, omit_failures)
-    EmailReport(main_report, 'Main', format_date(int_date))
+    EmailReport(main_report, 'Main', format_date(int_date), email_to)
     shutil.copy(main_report, ARCHIVE_DIR)
   if rotating_report_dict and not main_only and not failures_report:
     rotating_report = GenerateWaterfallReport(
         rotating_report_dict, failure_dict, 'rotating', int_date, omit_failures)
-    EmailReport(rotating_report, 'Rotating', format_date(int_date))
+    EmailReport(rotating_report, 'Rotating', format_date(int_date), email_to)
     shutil.copy(rotating_report, ARCHIVE_DIR)
 
   if failures_report:
     failures_report = GenerateFailuresReport(failure_dict, int_date)
-    EmailReport(failures_report, 'Failures', format_date(int_date))
+    EmailReport(failures_report, 'Failures', format_date(int_date), email_to)
     shutil.copy(failures_report, ARCHIVE_DIR)
 
   if not options.no_update:
diff --git a/go/android/adb_shamu b/go/android/adb_marlin
similarity index 74%
rename from go/android/adb_shamu
rename to go/android/adb_marlin
index 1c53ecc..476e660 100755
--- a/go/android/adb_shamu
+++ b/go/android/adb_marlin
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-# This wrapper runs adb with the serial number of the shamu device.
+# This wrapper runs adb with the serial number of the marlin device.
 # Replace XXXXXXXX with the actual serial number of the device.
 # This is just an example. Create one such wrapper for each Android
 # device used for running Go tests.
diff --git a/go/android/adb_marlin32 b/go/android/adb_marlin32
new file mode 120000
index 0000000..9cdd321
--- /dev/null
+++ b/go/android/adb_marlin32
@@ -0,0 +1 @@
+adb_marlin
\ No newline at end of file
diff --git a/go/android/adb_volantis b/go/android/adb_volantis
deleted file mode 100755
index 4712eec..0000000
--- a/go/android/adb_volantis
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# This wrapper runs adb with the serial number of the volantis device.
-# Replace YYYYYYYY with the actual serial number of the device.
-# This is just an example. Create one such wrapper for each Android
-# device used for running Go tests.
-
-exec adb -s YYYYYYYY "$@"
diff --git a/go/android/build_go b/go/android/build_go
index 65b7ec2..ecb3bee 100755
--- a/go/android/build_go
+++ b/go/android/build_go
@@ -15,12 +15,12 @@
 
 # Build the Go toolchain for arm devices.
 GOOS="android" GOARCH="arm" CGO_ENABLED="1" \
-	CC_FOR_TARGET="arm-linux-androideabi-gcc" \
-	CXX_FOR_TARGET="arm-linux-androideabi-g++" \
+	CC_FOR_TARGET="arm-linux-androideabi-clang" \
+	CXX_FOR_TARGET="arm-linux-androideabi-clang++" \
 	./make.bash --no-clean
 
 # Build the Go toolchain for arm64 devices.
 GOOS="android" GOARCH="arm64" CGO_ENABLED="1" \
-	CC_FOR_TARGET="aarch64-linux-android-gcc" \
-	CXX_FOR_TARGET="aarch64-linux-android-g++" \
+	CC_FOR_TARGET="aarch64-linux-android-clang" \
+	CXX_FOR_TARGET="aarch64-linux-android-clang++" \
 	./make.bash --no-clean
diff --git a/go/android/go_marlin b/go/android/go_marlin
new file mode 100755
index 0000000..bfb564f
--- /dev/null
+++ b/go/android/go_marlin
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Invoke the Go cross compiler for marlin.
+# Uses ../go_target to add PIE flags.
+#
+# This is just an example for an arm64 device.
+
+GOOS="android" GOARCH="arm64" CGO_ENABLED="1" \
+	CC="aarch64-linux-android-clang" \
+	CXX="aarch64-linux-android-clang++" \
+	exec go_target "$@"
diff --git a/go/android/go_marlin32 b/go/android/go_marlin32
new file mode 100755
index 0000000..d02dadc
--- /dev/null
+++ b/go/android/go_marlin32
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Invoke the Go cross compiler for marlin32.
+# Uses ../go_target to add PIE flags.
+#
+# This is just an example for an arm device.
+
+GOOS="android" GOARCH="arm" CGO_ENABLED="1" \
+	CC="arm-linux-androideabi-clang" \
+	CXX="arm-linux-androideabi-clang++" \
+	exec go_target "$@"
diff --git a/go/android/go_marlin32_exec b/go/android/go_marlin32_exec
new file mode 100755
index 0000000..ed3fdf4
--- /dev/null
+++ b/go/android/go_marlin32_exec
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# Copy and remotely execute a binary on the marlin32 device.
+#
+# For this to work, the corresponding adb_marlin32 wrapper
+# must exist to tell adb the serial number of the device.
+
+GOOS="android" GOARCH="arm" exec go_target_exec marlin32 "$@"
diff --git a/go/android/go_marlin_exec b/go/android/go_marlin_exec
new file mode 100755
index 0000000..9f4c06d
--- /dev/null
+++ b/go/android/go_marlin_exec
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# Copy and remotely execute a binary on the marlin device.
+#
+# For this to work, the corresponding adb_marlin wrapper
+# must exist to tell adb the serial number of the device.
+
+GOOS="android" GOARCH="arm64" exec go_target_exec marlin "$@"
diff --git a/go/android/go_shamu b/go/android/go_shamu
deleted file mode 100755
index 7e1ffbe..0000000
--- a/go/android/go_shamu
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# Invoke the Go cross compiler for shamu.
-# Uses ../go_target to add PIE flags.
-#
-# This is just an example for an arm device.
-
-GOOS="android" GOARCH="arm" CGO_ENABLED="1" \
-	CC="arm-linux-androideabi-gcc" \
-	CXX="arm-linux-androideabi-g++" \
-	exec go_target "$@"
diff --git a/go/android/go_shamu_exec b/go/android/go_shamu_exec
deleted file mode 100755
index 2c16902..0000000
--- a/go/android/go_shamu_exec
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# Copy and remotely execute a binary on the shamu device.
-#
-# For this to work, the corresponding adb_shamu wrapper
-# must exist to tell adb the serial number of the device.
-
-GOOS="android" GOARCH="arm" exec go_target_exec shamu "$@"
diff --git a/go/android/go_volantis b/go/android/go_volantis
deleted file mode 100755
index bfeab19..0000000
--- a/go/android/go_volantis
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# Invoke the Go cross compiler for volantis.
-# Uses ../go_target to add PIE flags.
-#
-# This is just an example for an arm64 device.
-
-GOOS="android" GOARCH="arm64" CGO_ENABLED="1" \
-	CC="aarch64-linux-android-gcc" \
-	CXX="aarch64-linux-android-g++" \
-	exec go_target "$@"
diff --git a/go/android/go_volantis_exec b/go/android/go_volantis_exec
deleted file mode 100755
index 86cb2cf..0000000
--- a/go/android/go_volantis_exec
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# Copy and remotely execute a binary on the volantis device.
-#
-# For this to work, the corresponding adb_volantis wrapper
-# must exist to tell adb the serial number of the device.
-
-GOOS="android" GOARCH="arm64" exec go_target_exec volantis "$@"
diff --git a/go/android/target_cp b/go/android/target_cp
index 8a31153..f6cd5cb 100755
--- a/go/android/target_cp
+++ b/go/android/target_cp
@@ -22,7 +22,7 @@
 
 if [[ -d ${src} ]]
 then
-	adb_${target} push ${src} ${dest}/${src##*/} 2>/dev/null
+	adb_${target} push ${src} ${dest}/${src##*/} >/dev/null
 else
-	adb_${target} push ${src} ${dest} 2>/dev/null
+	adb_${target} push ${src} ${dest} >/dev/null
 fi
diff --git a/go/chromeos/build_go b/go/chromeos/build_go
index cb882ea..164ea3f 100755
--- a/go/chromeos/build_go
+++ b/go/chromeos/build_go
@@ -6,8 +6,8 @@
 # Usage: build_go
 #
 # It assumes that the "x86_64-cros-linux-gnu" toolchain is already installed.
-# It assumes that the "i686-pc-linux-gnu" toolchain is already installed.
 # It assumes that the "armv7a-cros-linux-gnueabi" toolchain is already installed.
+# It assumes that the "aarch64-cros-linux-gnu" toolchain is already installed.
 
 if [[ ! -e "make.bash" && -e "src/make.bash" ]]
 then
@@ -16,30 +16,30 @@
 
 # Build the Go toolchain for amd64 targets.
 GOOS="linux" GOARCH="amd64" CGO_ENABLED="1" \
-	CC_FOR_TARGET="x86_64-cros-linux-gnu-gcc" \
-	CXX_FOR_TARGET="x86_64-cros-linux-gnu-g++" \
+	CC_FOR_TARGET="x86_64-cros-linux-gnu-clang" \
+	CXX_FOR_TARGET="x86_64-cros-linux-gnu-clang++" \
 	./make.bash --no-clean
 GOOS="linux" GOARCH="amd64" CGO_ENABLED="1" \
-	CC="x86_64-cros-linux-gnu-gcc" \
-	CXX="x86_64-cros-linux-gnu-g++" \
-	../bin/go install -v -buildmode=pie std
-
-# Build the Go toolchain for 386 targets.
-GOOS="linux" GOARCH="386" CGO_ENABLED="1" \
-	CC_FOR_TARGET="i686-pc-linux-gnu-gcc" \
-	CXX_FOR_TARGET="i686-pc-linux-gnu-g++" \
-	./make.bash --no-clean
-GOOS="linux" GOARCH="386" CGO_ENABLED="1" \
-	CC="i686-pc-linux-gnu-gcc" \
-	CXX="i686-pc-linux-gnu-g++" \
+	CC="x86_64-cros-linux-gnu-clang" \
+	CXX="x86_64-cros-linux-gnu-clang++" \
 	../bin/go install -v -buildmode=pie std
 
 # Build the Go toolchain for arm targets.
 GOOS="linux" GOARCH="arm" CGO_ENABLED="1" \
-	CC_FOR_TARGET="armv7a-cros-linux-gnueabi-gcc" \
-	CXX_FOR_TARGET="armv7a-cros-linux-gnueabi-g++" \
+	CC_FOR_TARGET="armv7a-cros-linux-gnueabi-clang" \
+	CXX_FOR_TARGET="armv7a-cros-linux-gnueabi-clang++" \
 	./make.bash --no-clean
 GOOS="linux" GOARCH="arm" CGO_ENABLED="1" \
-	CC="armv7a-cros-linux-gnueabi-gcc" \
-	CXX="armv7a-cros-linux-gnueabi-g++" \
+	CC="armv7a-cros-linux-gnueabi-clang" \
+	CXX="armv7a-cros-linux-gnueabi-clang++" \
+	../bin/go install -v -buildmode=pie std
+
+# Build the Go toolchain for arm64 targets.
+GOOS="linux" GOARCH="arm64" CGO_ENABLED="1" \
+	CC_FOR_TARGET="aarch64-cros-linux-gnu-clang" \
+	CXX_FOR_TARGET="aarch64-cros-linux-gnu-clang++" \
+	./make.bash --no-clean
+GOOS="linux" GOARCH="arm64" CGO_ENABLED="1" \
+	CC="aarch64-cros-linux-gnu-clang" \
+	CXX="aarch64-cros-linux-gnu-clang++" \
 	../bin/go install -v -buildmode=pie std
diff --git a/go/chromeos/go_chell b/go/chromeos/go_chell
new file mode 100755
index 0000000..ca6a7db
--- /dev/null
+++ b/go/chromeos/go_chell
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Invoke the Go cross compiler for chell.
+# Uses ../go_target to add PIE flags.
+#
+# This is just an example for an amd64 board.
+
+GOOS="linux" GOARCH="amd64" CGO_ENABLED="1" \
+	CC="x86_64-cros-linux-gnu-clang" \
+	CXX="x86_64-cros-linux-gnu-clang++" \
+	exec go_target "$@"
diff --git a/go/chromeos/go_daisy_exec b/go/chromeos/go_chell_exec
similarity index 61%
copy from go/chromeos/go_daisy_exec
copy to go/chromeos/go_chell_exec
index 3b9a63d..8fac94b 100755
--- a/go/chromeos/go_daisy_exec
+++ b/go/chromeos/go_chell_exec
@@ -1,10 +1,12 @@
 #!/bin/bash
 
-# Copy and remotely execute a binary on the daisy device.
+# Copy and remotely execute a binary on the chell device.
 #
 # For this to work, the corresponding entry must exist in
 # ~/.ssh/config and the device must already be setup for
 # password-less authentication. See setup instructions at
 # http://go/chromeos-toolchain-team/go-toolchain
 
-GOOS="linux" GOARCH="arm" exec go_target_exec daisy "$@"
+GOOS="linux" GOARCH="amd64" \
+	GOLOADER="/tmp/glibc/ld.so" \
+	exec go_target_exec chell "$@"
diff --git a/go/chromeos/go_daisy b/go/chromeos/go_daisy
deleted file mode 100755
index db4a95a..0000000
--- a/go/chromeos/go_daisy
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# Invoke the Go cross compiler for daisy.
-# Uses ../go_target to add PIE flags.
-#
-# This is just an example for an arm board.
-
-GOOS="linux" GOARCH="arm" CGO_ENABLED="1" \
-	CC="armv7a-cros-linux-gnueabi-gcc" \
-	CXX="armv7a-cros-linux-gnueabi-g++" \
-	exec go_target "$@"
diff --git a/go/chromeos/go_elm b/go/chromeos/go_elm
new file mode 100755
index 0000000..a92d9c6
--- /dev/null
+++ b/go/chromeos/go_elm
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Invoke the Go cross compiler for elm.
+# Uses ../go_target to add PIE flags.
+#
+# This is just an example for an arm64 board.
+
+GOOS="linux" GOARCH="arm64" CGO_ENABLED="1" \
+	CC="aarch64-cros-linux-gnu-clang" \
+	CXX="aarch64-cros-linux-gnu-clang++" \
+	exec go_target "$@"
diff --git a/go/chromeos/go_elm32 b/go/chromeos/go_elm32
new file mode 100755
index 0000000..2bcb3f3
--- /dev/null
+++ b/go/chromeos/go_elm32
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Invoke the Go cross compiler for elm32.
+# Uses ../go_target to add PIE flags.
+#
+# This is just an example for an arm board.
+
+GOOS="linux" GOARCH="arm" CGO_ENABLED="1" \
+	CC="armv7a-cros-linux-gnueabi-clang" \
+	CXX="armv7a-cros-linux-gnueabi-clang++" \
+	exec go_target "$@"
diff --git a/go/chromeos/go_daisy_exec b/go/chromeos/go_elm32_exec
similarity index 61%
copy from go/chromeos/go_daisy_exec
copy to go/chromeos/go_elm32_exec
index 3b9a63d..3e115a9 100755
--- a/go/chromeos/go_daisy_exec
+++ b/go/chromeos/go_elm32_exec
@@ -1,10 +1,12 @@
 #!/bin/bash
 
-# Copy and remotely execute a binary on the daisy device.
+# Copy and remotely execute a binary on the elm32 device.
 #
 # For this to work, the corresponding entry must exist in
 # ~/.ssh/config and the device must already be setup for
 # password-less authentication. See setup instructions at
 # http://go/chromeos-toolchain-team/go-toolchain
 
-GOOS="linux" GOARCH="arm" exec go_target_exec daisy "$@"
+GOOS="linux" GOARCH="arm" \
+	GOLOADER="/tmp/glibc32/ld.so" \
+	exec go_target_exec elm32 "$@"
diff --git a/go/chromeos/go_daisy_exec b/go/chromeos/go_elm_exec
similarity index 61%
rename from go/chromeos/go_daisy_exec
rename to go/chromeos/go_elm_exec
index 3b9a63d..da244c2 100755
--- a/go/chromeos/go_daisy_exec
+++ b/go/chromeos/go_elm_exec
@@ -1,10 +1,12 @@
 #!/bin/bash
 
-# Copy and remotely execute a binary on the daisy device.
+# Copy and remotely execute a binary on the elm device.
 #
 # For this to work, the corresponding entry must exist in
 # ~/.ssh/config and the device must already be setup for
 # password-less authentication. See setup instructions at
 # http://go/chromeos-toolchain-team/go-toolchain
 
-GOOS="linux" GOARCH="arm" exec go_target_exec daisy "$@"
+GOOS="linux" GOARCH="arm64" \
+	GOLOADER="/tmp/glibc/ld.so" \
+	exec go_target_exec elm "$@"
diff --git a/go/chromeos/go_panther b/go/chromeos/go_panther
deleted file mode 100755
index 5c06f43..0000000
--- a/go/chromeos/go_panther
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# Invoke the Go cross compiler for panther.
-# Uses ../go_target to add PIE flags.
-#
-# This is just an example for an amd64 board.
-
-GOOS="linux" GOARCH="amd64" CGO_ENABLED="1" \
-	CC="x86_64-cros-linux-gnu-gcc" \
-	CXX="x86_64-cros-linux-gnu-g++" \
-	exec go_target "$@"
diff --git a/go/chromeos/go_panther_exec b/go/chromeos/go_panther_exec
deleted file mode 100755
index 64f77b1..0000000
--- a/go/chromeos/go_panther_exec
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# Copy and remotely execute a binary on the panther device.
-#
-# For this to work, the corresponding entry must exist in
-# ~/.ssh/config and the device must already be setup for
-# password-less authentication. See setup instructions at
-# http://go/chromeos-toolchain-team/go-toolchain
-
-GOOS="linux" GOARCH="amd64" exec go_target_exec panther "$@"
diff --git a/go/chromeos/go_x86-zgb b/go/chromeos/go_x86-zgb
deleted file mode 100755
index 272efb5..0000000
--- a/go/chromeos/go_x86-zgb
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# Invoke the Go cross compiler for x86-zgb.
-# Uses ../go_target to add PIE flags.
-#
-# This is just an example for an 386 board.
-
-GOOS="linux" GOARCH="386" CGO_ENABLED="1" \
-	CC="i686-pc-linux-gnu-gcc" \
-	CXX="i686-pc-linux-gnu-g++" \
-	exec go_target "$@"
diff --git a/go/chromeos/go_x86-zgb_exec b/go/chromeos/go_x86-zgb_exec
deleted file mode 100755
index b0341f2..0000000
--- a/go/chromeos/go_x86-zgb_exec
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# Copy and remotely execute a binary on the x86-zgb device.
-#
-# For this to work, the corresponding entry must exist in
-# ~/.ssh/config and the device must already be setup for
-# password-less authentication. See setup instructions at
-# http://go/chromeos-toolchain-team/go-toolchain
-
-GOOS="linux" GOARCH="386" exec go_target_exec x86-zgb "$@"
diff --git a/go/chromeos/push_glibc b/go/chromeos/push_glibc
new file mode 100755
index 0000000..7528284
--- /dev/null
+++ b/go/chromeos/push_glibc
@@ -0,0 +1,56 @@
+#!/bin/bash
+set -e -o pipefail
+
+# This script copies a locally built GLIBC to a remote device.
+#
+# Usage: push_glibc <target>...
+#
+# This script works with 64-bit (amd64 or arm64) ChromeOS targets.
+# It copies both 32-bit and 64-bit glibc loaders onto the device.
+# This allows loading and running both 32-bit and 64-bit binaries
+# on the same device.
+
+for target in "$@"
+do
+	echo -n "pushing glibc to ${target} ... "
+	case "$(ssh ${target} uname -m)" in
+		x86_64)
+			glibc="/usr/x86_64-cros-linux-gnu/lib64"
+			loader="ld-linux-x86-64.so.2"
+			glibc32="/usr/i686-pc-linux-gnu/lib"
+			loader32="ld-linux.so.2"
+			;;
+		aarch64)
+			glibc="/usr/aarch64-cros-linux-gnu/lib64"
+			loader="ld-linux-aarch64.so.1"
+			glibc32="/usr/armv7a-cros-linux-gnueabi/lib"
+			loader32="ld-linux-armhf.so.3"
+			;;
+		*)
+			echo "unknown arch"
+			continue
+			;;
+	esac
+
+	target_sh ${target} "rm -rf /tmp/glibc"
+	target_sh ${target} "mkdir -p /tmp/glibc"
+	target_cp "${glibc}" ${target}:/tmp/glibc
+
+	target_sh ${target} "rm -rf /tmp/glibc32"
+	target_sh ${target} "mkdir -p /tmp/glibc32"
+	target_cp "${glibc32}" ${target}:/tmp/glibc32
+
+	echo "#!/bin/bash" > /tmp/ld.so
+	echo "LD_LIBRARY_PATH=/tmp/glibc/${glibc##*/} exec /tmp/glibc/${glibc##*/}/${loader} \"\$@\"" >> /tmp/ld.so
+	chmod +x /tmp/ld.so
+	target_cp /tmp/ld.so ${target}:/tmp/glibc
+	rm /tmp/ld.so
+
+	echo "#!/bin/bash" > /tmp/ld.so
+	echo "LD_LIBRARY_PATH=/tmp/glibc32/${glibc32##*/} exec /tmp/glibc32/${glibc32##*/}/${loader32} \"\$@\"" >> /tmp/ld.so
+	chmod +x /tmp/ld.so
+	target_cp /tmp/ld.so ${target}:/tmp/glibc32
+	rm /tmp/ld.so
+
+	echo "done"
+done
diff --git a/go/chromeos/target_cp b/go/chromeos/target_cp
index 6df476e..8e0c405 100755
--- a/go/chromeos/target_cp
+++ b/go/chromeos/target_cp
@@ -22,7 +22,7 @@
 
 if [[ -d ${src} ]]
 then
-	scp -rq ${src} ${target}:${dest}
+	tar -C $(dirname ${src}) -zcf - $(basename ${src}) | ssh ${target} "tar -C ${dest} -zxf -"
 else
 	scp -q ${src} ${target}:${dest}
 fi
diff --git a/go/go_local b/go/go_local
index 92954ef..cb2a4dc 100755
--- a/go/go_local
+++ b/go/go_local
@@ -3,6 +3,6 @@
 # Invoke the Go compiler for localhost.
 
 GOOS="linux" GOARCH="amd64" CGO_ENABLED="1" \
-	CC="gcc" \
-	CXX="g++" \
+	CC="clang" \
+	CXX="clang++" \
 	exec go "$@"
diff --git a/go/go_target b/go/go_target
index 0578637..8943d81 100755
--- a/go/go_target
+++ b/go/go_target
@@ -4,7 +4,7 @@
 # This script wraps the go cross compilers.
 #
 # It ensures that Go binaries are linked with an external linker
-# by default (cross gcc). Appropriate flags are added to build a
+# by default (cross clang). Appropriate flags are added to build a
 # position independent executable (PIE) for ASLR.
 # "export GOPIE=0" to temporarily disable this behavior.
 
@@ -36,44 +36,28 @@
 	case "$1" in
 		build | install | run | test)
 			# Add "-buildmode=pie" to "go build|install|run|test" commands.
-			pie_flags=(
-				"$1"
-				"-buildmode=pie"
-			)
+			pie_flags=( "$1" )
 			shift
+			[[ "${GOOS}" == "android" ]] || pie_flags+=( "-buildmode=pie" )
 			;;
 		tool)
 			case "$2" in
 				asm)
 					# Handle direct assembler invocations ("go tool asm <args>").
-					pie_flags=(
-						"$1"
-						"$2"
-						"-shared"
-					)
+					pie_flags=( "$1" "$2" "-shared" )
 					shift 2
 					;;
 				compile)
 					# Handle direct compiler invocations ("go tool compile <args>").
-					pie_flags=(
-						"$1"
-						"$2"
-						"-shared"
-						"-installsuffix=shared"
-					)
+					pie_flags=( "$1" "$2" "-shared" )
 					shift 2
+					[[ "${GOOS}" == "android" ]] || pie_flags+=( "-installsuffix=shared" )
 					;;
 				link)
 					# Handle direct linker invocations ("go tool link <args>").
-					pie_flags=(
-						"$1"
-						"$2"
-						"-installsuffix=shared"
-						"-buildmode=pie"
-						"-extld"
-						"${CC}"
-					)
+					pie_flags=( "$1" "$2" "-extld" "${CC}" "-buildmode=pie" )
 					shift 2
+					[[ "${GOOS}" == "android" ]] || pie_flags+=( "-installsuffix=shared" )
 					;;
 			esac
 			;;
diff --git a/go/go_target_exec b/go/go_target_exec
index 34d9e79..0a44b4c 100755
--- a/go/go_target_exec
+++ b/go/go_target_exec
@@ -27,15 +27,15 @@
 goroot="$(go_${target} env GOROOT)"
 if [[ "${PWD}" == ${goroot}/src/* ]]
 then
-	targetdir="${tmpdir}/go/src/${PWD#${goroot}/src/}"
+	targetdir="${tmpdir}/goroot/src/${PWD#${goroot}/src/}"
 fi
 
 # Set GOROOT, and forward some environment variables to the remote shell.
-vars="GOROOT=${tmpdir}/go"
+vars="GOROOT=${tmpdir}/goroot"
 vars+="${GOOS:+ GOOS=${GOOS}}"
 vars+="${GOARCH:+ GOARCH=${GOARCH}}"
 vars+="${GOMAXPROCS:+ GOMAXPROCS=${GOMAXPROCS}}"
 vars+="${GOTRACEBACK:+ GOTRACEBACK=${GOTRACEBACK}}"
 
 # Remotely execute the binary using ssh (for ChromeOS) or adb (for Android).
-target_sh ${target} "cd ${targetdir} && ${vars} ${tmpdir}/a.out $*"
+target_sh ${target} "cd ${targetdir} && ${vars} ${GOLOADER} ${tmpdir}/a.out $*"
diff --git a/go/patch/go0.patch b/go/patch/go0.patch
index 27e1451..c539865 100644
--- a/go/patch/go0.patch
+++ b/go/patch/go0.patch
@@ -2,26 +2,33 @@
 
 --- src/go/build/deps_test.go
 +++ src/go/build/deps_test.go
-@@ -168,7 +168,7 @@ var pkgDeps = map[string][]string{
- 	"testing":          {"L2", "flag", "fmt", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"},
+@@ -182,17 +182,17 @@ var pkgDeps = map[string][]string{
+ 	"runtime/debug":  {"L2", "fmt", "io/ioutil", "os", "time"},
+ 	"runtime/pprof":  {"L2", "compress/gzip", "context", "encoding/binary", "fmt", "io/ioutil", "os", "text/tabwriter", "time"},
+ 	"runtime/trace":  {"L0"},
+ 	"text/tabwriter": {"L2"},
+ 
+ 	"testing":          {"L2", "flag", "fmt", "internal/race", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"},
  	"testing/iotest":   {"L2", "log"},
- 	"testing/quick":    {"L2", "flag", "fmt", "reflect"},
--	"internal/testenv": {"L2", "os", "testing"},
-+	"internal/testenv": {"L2", "os", "os/exec", "testing"},
+ 	"testing/quick":    {"L2", "flag", "fmt", "reflect", "time"},
+-	"internal/testenv": {"L2", "OS", "flag", "testing", "syscall"},
++	"internal/testenv": {"L2", "OS", "os/exec", "flag", "testing", "syscall"},
  
  	// L4 is defined as L3+fmt+log+time, because in general once
  	// you're using L3 packages, use of fmt, log, or time is not a big deal.
+ 	"L4": {
+ 		"L3",
+ 		"fmt",
+ 		"log",
+ 		"time",
 --- src/internal/testenv/testenv.go
 +++ src/internal/testenv/testenv.go
-@@ -12,6 +12,7 @@ package testenv
- 
- import (
- 	"os"
-+	"os/exec"
- 	"runtime"
- 	"strings"
- 	"testing"
-@@ -36,6 +37,9 @@ func HasGoBuild() bool {
+@@ -43,16 +43,19 @@ func HasGoBuild() bool {
+ 	switch runtime.GOOS {
+ 	case "android", "nacl":
+ 		return false
+ 	case "darwin":
+ 		if strings.HasPrefix(runtime.GOARCH, "arm") {
  			return false
  		}
  	}
@@ -31,3 +38,8 @@
  	return true
  }
  
+ // MustHaveGoBuild checks that the current system can build programs with ``go build''
+ // and then run them with os.StartProcess or exec.Command.
+ // If not, MustHaveGoBuild calls t.Skip with an explanation.
+ func MustHaveGoBuild(t testing.TB) {
+ 	if os.Getenv("GO_GCFLAGS") != "" {
diff --git a/go/patch/go1.patch b/go/patch/go1.patch
index 49d229e..e32268a 100644
--- a/go/patch/go1.patch
+++ b/go/patch/go1.patch
@@ -2,49 +2,66 @@
 
 --- test/chanlinear.go
 +++ test/chanlinear.go
-@@ -1,4 +1,4 @@
+@@ -1,9 +1,9 @@
 -// +build darwin linux
 +// +build darwin linux android
  // run
  
  // Copyright 2014 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Test that dequeueing from a pending channel doesn't
+ // take linear time.
 --- test/fixedbugs/bug385_64.go
 +++ test/fixedbugs/bug385_64.go
-@@ -1,4 +1,4 @@
+@@ -1,9 +1,9 @@
 -// +build amd64
 +// +build amd64 arm64
  // errorcheck
  
- // Copyright 2011 The Go Authors.  All rights reserved.
+ // Copyright 2011 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Issue 2444
+ // Issue 4666: issue with arrays of exactly 4GB.
 --- test/fixedbugs/issue10607.go
 +++ test/fixedbugs/issue10607.go
-@@ -1,4 +1,4 @@
--// +build linux,!ppc64,!ppc64le,!mips64,!mips64le
-+// +build linux,!ppc64,!ppc64le,!mips64,!mips64le android
+@@ -1,9 +1,9 @@
+-// +build linux,!ppc64
++// +build linux,!ppc64 android
  // run
  
  // Copyright 2015 The Go Authors. All rights reserved.
---- test/fixedbugs/issue6036.go
-+++ test/fixedbugs/issue6036.go
-@@ -1,4 +1,4 @@
--// +build amd64
-+// +build amd64 arm64
- // compile
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
  
- // Copyright 2013 The Go Authors.  All rights reserved.
+ // Test that a -B option is passed through when using both internal
+ // and external linking mode.
 --- test/maplinear.go
 +++ test/maplinear.go
-@@ -1,4 +1,4 @@
+@@ -1,9 +1,9 @@
 -// +build darwin linux
 +// +build darwin linux android
  // run
  
  // Copyright 2013 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Test that maps don't go quadratic for NaNs and other values.
+ 
 --- test/recover4.go
 +++ test/recover4.go
-@@ -1,4 +1,4 @@
+@@ -1,9 +1,9 @@
 -// +build linux darwin
 +// +build linux android darwin
  // run
  
- // Copyright 2015 The Go Authors.  All rights reserved.
+ // Copyright 2015 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Test that if a slice access causes a fault, a deferred func
+ // sees the most recent value of the variables it accesses.
diff --git a/go/patch/go2.patch b/go/patch/go2.patch
index dfc236a..20f0479 100644
--- a/go/patch/go2.patch
+++ b/go/patch/go2.patch
@@ -2,10 +2,15 @@
 
 --- test/run.go
 +++ test/run.go
-@@ -37,9 +37,9 @@ var (
+@@ -34,19 +34,19 @@ import (
+ 
+ var (
+ 	verbose        = flag.Bool("v", false, "verbose. if set, parallelism is set to 1.")
+ 	keep           = flag.Bool("k", false, "keep. keep temporary directory.")
  	numParallel    = flag.Int("n", runtime.NumCPU(), "number of parallel tests to run")
  	summary        = flag.Bool("summary", false, "show summary of results")
  	showSkips      = flag.Bool("show_skips", false, "show skipped tests")
+ 	runSkips       = flag.Bool("run_skips", false, "run skipped tests (ignore skip and build tags)")
 -	linkshared     = flag.Bool("linkshared", false, "")
  	updateErrors   = flag.Bool("update_errors", false, "update error messages in test file based on compiler output")
  	runoutputLimit = flag.Int("l", defaultRunOutputLimit(), "number of parallel runoutput tests to run")
@@ -13,29 +18,42 @@
  
  	shard  = flag.Int("shard", 0, "shard index to run. Only applicable if -shards is non-zero.")
  	shards = flag.Int("shards", 0, "number of shards. If 0, all tests are run. This is used by the continuous build.")
-@@ -192,19 +192,11 @@ func goFiles(dir string) []string {
- type runCmd func(...string) ([]byte, error)
+ )
  
- func compileFile(runcmd runCmd, longname string) (out []byte, err error) {
--	cmd := []string{"go", "tool", "compile", "-e"}
--	if *linkshared {
--		cmd = append(cmd, "-dynlink", "-installsuffix=dynlink")
--	}
--	cmd = append(cmd, longname)
--	return runcmd(cmd...)
-+	return runcmd(findGoCmd(), "tool", "compile", "-e", longname)
+ var (
+ 	goos, goarch string
+ 
+@@ -189,48 +189,49 @@ func goFiles(dir string) []string {
+ 	}
+ 	sort.Strings(names)
+ 	return names
  }
  
- func compileInDir(runcmd runCmd, dir string, names ...string) (out []byte, err error) {
--	cmd := []string{"go", "tool", "compile", "-e", "-D", ".", "-I", "."}
+ type runCmd func(...string) ([]byte, error)
+ 
+ func compileFile(runcmd runCmd, longname string, flags []string) (out []byte, err error) {
+-	cmd := []string{"go", "tool", "compile", "-e"}
++	cmd := []string{findGoCmd(), "tool", "compile", "-e"}
+ 	cmd = append(cmd, flags...)
 -	if *linkshared {
 -		cmd = append(cmd, "-dynlink", "-installsuffix=dynlink")
 -	}
+ 	cmd = append(cmd, longname)
+ 	return runcmd(cmd...)
+ }
+ 
+ func compileInDir(runcmd runCmd, dir string, flags []string, names ...string) (out []byte, err error) {
+-	cmd := []string{"go", "tool", "compile", "-e", "-D", ".", "-I", "."}
 +	cmd := []string{findGoCmd(), "tool", "compile", "-e", "-D", ".", "-I", "."}
+ 	cmd = append(cmd, flags...)
+-	if *linkshared {
+-		cmd = append(cmd, "-dynlink", "-installsuffix=dynlink")
+-	}
  	for _, name := range names {
  		cmd = append(cmd, filepath.Join(dir, name))
  	}
-@@ -213,15 +205,21 @@ func compileInDir(runcmd runCmd, dir string, names ...string) (out []byte, err e
+ 	return runcmd(cmd...)
+ }
  
  func linkFile(runcmd runCmd, goname string) (err error) {
  	pfile := strings.Replace(goname, ".go", ".o", -1)
@@ -49,12 +67,13 @@
  	return
  }
  
-+func goRun(runcmd runCmd, goname string, args ...string) (out []byte, err error) {
-+	cmd := []string{findGoCmd(), "run"}
++func goRun(runcmd runCmd, flags []string, goname string, args ...string) (out []byte, err error) {
++	cmd := []string{findGoCmd(), "run", goGcflags()}
 +	if len(findExecCmd()) > 0 {
 +		cmd = append(cmd, "-exec")
 +		cmd = append(cmd, findExecCmd()...)
 +	}
++	cmd = append(cmd, flags...)
 +	cmd = append(cmd, goname)
 +	cmd = append(cmd, args...)
 +	return runcmd(cmd...)
@@ -63,81 +82,230 @@
  // skipError describes why a test was skipped.
  type skipError string
  
-@@ -530,8 +528,7 @@ func (t *test) run() {
+ func (s skipError) Error() string { return string(s) }
+ 
+ func check(err error) {
+ 	if err != nil {
+ 		log.Fatal(err)
+@@ -590,18 +591,17 @@ func (t *test) run() {
+ 
+ 	long := filepath.Join(cwd, t.goFileName())
+ 	switch action {
+ 	default:
  		t.err = fmt.Errorf("unimplemented action %q", action)
  
  	case "errorcheck":
--		cmdline := []string{"go", "tool", "compile", "-e", "-o", "a.o"}
+ 		// TODO(gri) remove need for -C (disable printing of columns in error messages)
+-		cmdline := []string{"go", "tool", "compile", "-C", "-e", "-o", "a.o"}
 -		// No need to add -dynlink even if linkshared if we're just checking for errors...
-+		cmdline := []string{findGoCmd(), "tool", "compile", "-e", "-o", "a.o"}
++		cmdline := []string{findGoCmd(), "tool", "compile", "-C", "-e", "-o", "a.o"}
  		cmdline = append(cmdline, flags...)
  		cmdline = append(cmdline, long)
  		out, err := runcmd(cmdline...)
-@@ -640,19 +637,14 @@ func (t *test) run() {
+ 		if wantError {
+ 			if err == nil {
+ 				t.err = fmt.Errorf("compilation succeeded unexpectedly\n%s", out)
+ 				return
+ 			}
+@@ -704,17 +704,17 @@ func (t *test) run() {
+ 				}
+ 				if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
+ 					t.err = fmt.Errorf("incorrect output\n%s", out)
+ 				}
+ 			}
  		}
  
  	case "build":
--		_, err := runcmd("go", "build", "-o", "a.exe", long)
-+		_, err := runcmd(findGoCmd(), "build", "-o", "a.exe", long)
+-		_, err := runcmd("go", "build", goGcflags(), "-o", "a.exe", long)
++		_, err := runcmd(findGoCmd(), "build", goGcflags(), "-o", "a.exe", long)
  		if err != nil {
  			t.err = err
  		}
  
+ 	case "builddir":
+ 		// Build an executable from all the .go and .s files in a subdirectory.
+ 		useTmp = true
+ 		longdir := filepath.Join(cwd, t.goDirName())
+@@ -730,177 +730,132 @@ func (t *test) run() {
+ 			case ".go":
+ 				gos = append(gos, file)
+ 			case ".s":
+ 				asms = append(asms, file)
+ 			}
+ 
+ 		}
+ 		var objs []string
+-		cmd := []string{"go", "tool", "compile", "-e", "-D", ".", "-I", ".", "-o", "go.o"}
++		cmd := []string{findGoCmd(), "tool", "compile", "-e", "-D", ".", "-I", ".", "-o", "go.o"}
+ 		if len(asms) > 0 {
+ 			cmd = append(cmd, "-asmhdr", "go_asm.h")
+ 		}
+ 		for _, file := range gos {
+ 			cmd = append(cmd, filepath.Join(longdir, file.Name()))
+ 		}
+ 		_, err := runcmd(cmd...)
+ 		if err != nil {
+ 			t.err = err
+ 			break
+ 		}
+ 		objs = append(objs, "go.o")
+ 		if len(asms) > 0 {
+-			cmd = []string{"go", "tool", "asm", "-e", "-I", ".", "-o", "asm.o"}
++			cmd = []string{findGoCmd(), "tool", "asm", "-e", "-I", ".", "-o", "asm.o"}
+ 			for _, file := range asms {
+ 				cmd = append(cmd, filepath.Join(longdir, file.Name()))
+ 			}
+ 			_, err = runcmd(cmd...)
+ 			if err != nil {
+ 				t.err = err
+ 				break
+ 			}
+ 			objs = append(objs, "asm.o")
+ 		}
+-		cmd = []string{"go", "tool", "pack", "c", "all.a"}
++		cmd = []string{findGoCmd(), "tool", "pack", "c", "all.a"}
+ 		cmd = append(cmd, objs...)
+ 		_, err = runcmd(cmd...)
+ 		if err != nil {
+ 			t.err = err
+ 			break
+ 		}
+-		cmd = []string{"go", "tool", "link", "all.a"}
++		cmd = []string{findGoCmd(), "tool", "link", "all.a"}
+ 		_, err = runcmd(cmd...)
+ 		if err != nil {
+ 			t.err = err
+ 			break
+ 		}
+ 
+ 	case "buildrun": // build binary, then run binary, instead of go run. Useful for timeout tests where failure mode is infinite loop.
+ 		// TODO: not supported on NaCl
+ 		useTmp = true
+-		cmd := []string{"go", "build", goGcflags(), "-o", "a.exe"}
+-		if *linkshared {
+-			cmd = append(cmd, "-linkshared")
+-		}
++		cmd := []string{findGoCmd(), "build", goGcflags(), "-o", "a.exe"}
+ 		longdirgofile := filepath.Join(filepath.Join(cwd, t.dir), t.gofile)
+ 		cmd = append(cmd, flags...)
+ 		cmd = append(cmd, longdirgofile)
+ 		out, err := runcmd(cmd...)
+ 		if err != nil {
+ 			t.err = err
+ 			return
+ 		}
+-		cmd = []string{"./a.exe"}
++		cmd = []string{}
++		if len(findExecCmd()) > 0 {
++			cmd = append(cmd, findExecCmd()...)
++		}
++		cmd = append(cmd, "./a.exe")
+ 		out, err = runcmd(append(cmd, args...)...)
+ 		if err != nil {
+ 			t.err = err
+ 			return
+ 		}
+ 
+ 		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
+ 			t.err = fmt.Errorf("incorrect output\n%s", out)
+ 		}
+ 
  	case "run":
  		useTmp = false
--		cmd := []string{"go", "run"}
--		if *linkshared {
--			cmd = append(cmd, "-linkshared")
+-		var out []byte
+-		var err error
+-		if len(flags)+len(args) == 0 && goGcflags() == "" && !*linkshared {
+-			// If we're not using special go command flags,
+-			// skip all the go command machinery.
+-			// This avoids any time the go command would
+-			// spend checking whether, for example, the installed
+-			// package runtime is up to date.
+-			// Because we run lots of trivial test programs,
+-			// the time adds up.
+-			pkg := filepath.Join(t.tempDir, "pkg.a")
+-			if _, err := runcmd("go", "tool", "compile", "-o", pkg, t.goFileName()); err != nil {
+-				t.err = err
+-				return
+-			}
+-			exe := filepath.Join(t.tempDir, "test.exe")
+-			cmd := []string{"go", "tool", "link", "-s", "-w"}
+-			cmd = append(cmd, "-o", exe, pkg)
+-			if _, err := runcmd(cmd...); err != nil {
+-				t.err = err
+-				return
+-			}
+-			out, err = runcmd(append([]string{exe}, args...)...)
+-		} else {
+-			cmd := []string{"go", "run", goGcflags()}
+-			if *linkshared {
+-				cmd = append(cmd, "-linkshared")
+-			}
+-			cmd = append(cmd, flags...)
+-			cmd = append(cmd, t.goFileName())
+-			out, err = runcmd(append(cmd, args...)...)
 -		}
--		cmd = append(cmd, t.goFileName())
--		out, err := runcmd(append(cmd, args...)...)
-+		out, err := goRun(runcmd, t.goFileName(), args...)
++		out, err := goRun(runcmd, flags, t.goFileName(), args...)
  		if err != nil {
  			t.err = err
  			return
-@@ -667,12 +659,7 @@ func (t *test) run() {
+ 		}
+ 		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
+ 			t.err = fmt.Errorf("incorrect output\n%s", out)
+ 		}
+ 
+ 	case "runoutput":
+ 		rungatec <- true
+ 		defer func() {
  			<-rungatec
  		}()
  		useTmp = false
--		cmd := []string{"go", "run"}
+-		cmd := []string{"go", "run", goGcflags()}
 -		if *linkshared {
 -			cmd = append(cmd, "-linkshared")
 -		}
 -		cmd = append(cmd, t.goFileName())
 -		out, err := runcmd(append(cmd, args...)...)
-+		out, err := goRun(runcmd, t.goFileName(), args...)
++		out, err := goRun(runcmd, nil, t.goFileName(), args...)
  		if err != nil {
  			t.err = err
  			return
-@@ -682,12 +669,7 @@ func (t *test) run() {
+ 		}
+ 		tfile := filepath.Join(t.tempDir, "tmp__.go")
+ 		if err := ioutil.WriteFile(tfile, out, 0666); err != nil {
  			t.err = fmt.Errorf("write tempfile:%s", err)
  			return
  		}
--		cmd = []string{"go", "run"}
+-		cmd = []string{"go", "run", goGcflags()}
 -		if *linkshared {
 -			cmd = append(cmd, "-linkshared")
 -		}
 -		cmd = append(cmd, tfile)
 -		out, err = runcmd(cmd...)
-+		out, err = goRun(runcmd, tfile)
++		out, err = goRun(runcmd, nil, tfile)
  		if err != nil {
  			t.err = err
  			return
-@@ -698,12 +680,7 @@ func (t *test) run() {
+ 		}
+ 		if string(out) != t.expectedOutput() {
+ 			t.err = fmt.Errorf("incorrect output\n%s", out)
+ 		}
  
  	case "errorcheckoutput":
  		useTmp = false
--		cmd := []string{"go", "run"}
+-		cmd := []string{"go", "run", goGcflags()}
 -		if *linkshared {
 -			cmd = append(cmd, "-linkshared")
 -		}
 -		cmd = append(cmd, t.goFileName())
 -		out, err := runcmd(append(cmd, args...)...)
-+		out, err := goRun(runcmd, t.goFileName(), args...)
++		out, err := goRun(runcmd, nil, t.goFileName(), args...)
  		if err != nil {
  			t.err = err
  			return
-@@ -714,7 +691,7 @@ func (t *test) run() {
+ 		}
+ 		tfile := filepath.Join(t.tempDir, "tmp__.go")
+ 		err = ioutil.WriteFile(tfile, out, 0666)
+ 		if err != nil {
  			t.err = fmt.Errorf("write tempfile:%s", err)
  			return
  		}
@@ -146,7 +314,17 @@
  		cmdline = append(cmdline, flags...)
  		cmdline = append(cmdline, tfile)
  		out, err = runcmd(cmdline...)
-@@ -741,6 +718,10 @@ func findExecCmd() []string {
+ 		if wantError {
+ 			if err == nil {
+ 				t.err = fmt.Errorf("compilation succeeded unexpectedly\n%s", out)
+ 				return
+ 			}
+@@ -917,26 +872,37 @@ func (t *test) run() {
+ 
+ var execCmd []string
+ 
+ func findExecCmd() []string {
+ 	if execCmd != nil {
  		return execCmd
  	}
  	execCmd = []string{} // avoid work the second time
@@ -157,7 +335,10 @@
  	if goos == runtime.GOOS && goarch == runtime.GOARCH {
  		return execCmd
  	}
-@@ -751,6 +732,13 @@ func findExecCmd() []string {
+ 	path, err := exec.LookPath(fmt.Sprintf("go_%s_%s_exec", goos, goarch))
+ 	if err == nil {
+ 		execCmd = []string{path}
+ 	}
  	return execCmd
  }
  
@@ -171,3 +352,8 @@
  func (t *test) String() string {
  	return filepath.Join(t.dir, t.gofile)
  }
+ 
+ func (t *test) makeTempDir() {
+ 	var err error
+ 	t.tempDir, err = ioutil.TempDir("", "")
+ 	check(err)
diff --git a/go/patch/go3.patch b/go/patch/go3.patch
index 37bd562..62247a0 100644
--- a/go/patch/go3.patch
+++ b/go/patch/go3.patch
@@ -2,14 +2,15 @@
 
 --- test/fixedbugs/bug248.go
 +++ test/fixedbugs/bug248.go
-@@ -1,5 +1,5 @@
+@@ -1,38 +1,57 @@
  // +build !nacl,!plan9,!windows
 -// run
 +// runtarget
  
  // Copyright 2009 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -8,13 +8,32 @@
+ // license that can be found in the LICENSE file.
+ 
  package main
  
  import (
@@ -42,7 +43,7 @@
  	// TODO: If we get rid of errchk, re-enable this test on Windows.
  	errchk, err := filepath.Abs("errchk")
  	check(err)
-@@ -22,12 +41,12 @@ func main() {
+ 
  	err = os.Chdir(filepath.Join("fixedbugs", "bug248.dir"))
  	check(err)
  
@@ -61,16 +62,22 @@
  
  	os.Remove("bug0.o")
  	os.Remove("bug1.o")
+ 	os.Remove("bug2.o")
+ 	os.Remove("a.out")
+ }
+ 
+ func run(name string, args ...string) {
 --- test/fixedbugs/bug302.go
 +++ test/fixedbugs/bug302.go
-@@ -1,5 +1,5 @@
+@@ -1,28 +1,39 @@
  // +build !nacl
 -// run
 +// runtarget
  
- // Copyright 2010 The Go Authors.  All rights reserved.
+ // Copyright 2010 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -8,16 +8,27 @@
+ // license that can be found in the LICENSE file.
+ 
  package main
  
  import (
@@ -101,16 +108,22 @@
  	os.Remove("p.o")
  	os.Remove("pp.a")
  	os.Remove("main.o")
+ }
+ 
+ func run(cmd string, args ...string) {
+ 	out, err := exec.Command(cmd, args...).CombinedOutput()
+ 	if err != nil {
 --- test/fixedbugs/bug345.go
 +++ test/fixedbugs/bug345.go
-@@ -1,5 +1,5 @@
+@@ -1,34 +1,45 @@
  // +build !nacl,!plan9,!windows
 -// run
 +// runtarget
  
- // Copyright 2011 The Go Authors.  All rights reserved.
+ // Copyright 2011 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -8,13 +8,24 @@
+ // license that can be found in the LICENSE file.
+ 
  package main
  
  import (
@@ -135,7 +148,7 @@
  	// TODO: If we get rid of errchk, re-enable this test on Plan 9 and Windows.
  	errchk, err := filepath.Abs("errchk")
  	check(err)
-@@ -22,8 +33,8 @@ func main() {
+ 
  	err = os.Chdir(filepath.Join(".", "fixedbugs", "bug345.dir"))
  	check(err)
  
@@ -146,16 +159,24 @@
  	os.Remove("io.o")
  }
  
+ func run(name string, args ...string) {
+ 	cmd := exec.Command(name, args...)
+ 	out, err := cmd.CombinedOutput()
+ 	if err != nil {
+ 		fmt.Println(string(out))
 --- test/fixedbugs/bug369.go
 +++ test/fixedbugs/bug369.go
-@@ -1,5 +1,5 @@
+@@ -1,35 +1,54 @@
  // +build !nacl,!windows
 -// run
 +// runtarget
  
- // Copyright 2011 The Go Authors.  All rights reserved.
+ // Copyright 2011 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -10,21 +10,40 @@
+ // license that can be found in the LICENSE file.
+ 
+ // Test that compiling with optimization turned on produces faster code.
+ 
  package main
  
  import (
@@ -201,16 +222,24 @@
  
  	os.Remove("slow.o")
  	os.Remove("fast.o")
+ 	os.Remove("main.o")
+ 	os.Remove("a.exe")
+ }
+ 
+ func run(name string, args ...string) {
 --- test/fixedbugs/bug429_run.go
 +++ test/fixedbugs/bug429_run.go
-@@ -1,5 +1,5 @@
+@@ -1,29 +1,49 @@
  // +build !nacl
 -// run
 +// runtarget
  
  // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -10,6 +10,7 @@
+ // license that can be found in the LICENSE file.
+ 
+ // Run the bug429.go test.
+ 
  package main
  
  import (
@@ -218,7 +247,7 @@
  	"fmt"
  	"os"
  	"os/exec"
-@@ -17,8 +18,27 @@ import (
+ 	"path/filepath"
  	"strings"
  )
  
@@ -247,16 +276,25 @@
  	out, err := cmd.CombinedOutput()
  	if err == nil {
  		fmt.Println("expected deadlock")
+ 		os.Exit(1)
+ 	}
+ 
+ 	want := "fatal error: all goroutines are asleep - deadlock!"
+ 	got := string(out)
 --- test/fixedbugs/issue10607.go
 +++ test/fixedbugs/issue10607.go
-@@ -1,5 +1,5 @@
- // +build linux,!ppc64,!ppc64le,!mips64,!mips64le android
+@@ -1,31 +1,51 @@
+ // +build linux,!ppc64 android
 -// run
 +// runtarget
  
  // Copyright 2015 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -11,19 +11,39 @@
+ // license that can be found in the LICENSE file.
+ 
+ // Test that a -B option is passed through when using both internal
+ // and external linking mode.
+ 
  package main
  
  import (
@@ -286,8 +324,9 @@
 +}
 +
  func main() {
+-	test("internal")
 +	flag.Parse()
- 	test("internal")
++	// test("internal")
  	test("external")
  }
  
@@ -297,16 +336,22 @@
  	if err != nil {
  		fmt.Printf("BUG: linkmode=%s %v\n%s\n", linkmode, err, out)
  		os.Exit(1)
+ 	}
+ }
 --- test/fixedbugs/issue11771.go
 +++ test/fixedbugs/issue11771.go
-@@ -1,5 +1,5 @@
+@@ -1,31 +1,42 @@
  // +build !nacl
 -// run
 +// runtarget
  
- // Copyright 2015 The Go Authors.  All rights reserved.
+ // Copyright 2015 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -11,6 +11,7 @@ package main
+ // license that can be found in the LICENSE file.
+ 
+ // Issue 11771: Magic comments should ignore carriage returns.
+ 
+ package main
  
  import (
  	"bytes"
@@ -314,7 +359,9 @@
  	"fmt"
  	"io/ioutil"
  	"log"
-@@ -20,7 +21,17 @@ import (
+ 	"os"
+ 	"os/exec"
+ 	"path/filepath"
  	"runtime"
  )
  
@@ -332,7 +379,17 @@
  	if runtime.Compiler != "gc" {
  		return
  	}
-@@ -52,7 +63,7 @@ func x() {
+ 
+ 	dir, err := ioutil.TempDir("", "go-issue11771")
+ 	if err != nil {
+ 		log.Fatalf("creating temp dir: %v\n", err)
+ 	}
+@@ -47,17 +58,17 @@ func main() {
+ func x() {
+ }
+ `)
+ 
+ 	if err := ioutil.WriteFile(filepath.Join(dir, "x.go"), buf.Bytes(), 0666); err != nil {
  		log.Fatal(err)
  	}
  
@@ -341,15 +398,21 @@
  	cmd.Dir = dir
  	output, err := cmd.CombinedOutput()
  	if err == nil {
+ 		log.Fatal("compile succeeded unexpectedly")
+ 	}
+ 	if !bytes.Contains(output, []byte("only allowed in runtime")) {
+ 		log.Fatalf("wrong error message from compiler; got:\n%s\n", output)
+ 	}
 --- test/fixedbugs/issue9355.go
 +++ test/fixedbugs/issue9355.go
-@@ -1,4 +1,4 @@
+@@ -1,34 +1,45 @@
 -// run
 +// runtarget
  
- // Copyright 2014 The Go Authors.  All rights reserved.
+ // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -7,6 +7,7 @@
+ // license that can be found in the LICENSE file.
+ 
  package main
  
  import (
@@ -357,7 +420,8 @@
  	"fmt"
  	"os"
  	"os/exec"
-@@ -15,7 +16,17 @@ import (
+ 	"path/filepath"
+ 	"regexp"
  	"runtime"
  )
  
@@ -375,7 +439,7 @@
  	if runtime.Compiler != "gc" || runtime.GOOS == "nacl" {
  		return
  	}
-@@ -23,7 +34,7 @@ func main() {
+ 
  	err := os.Chdir(filepath.Join("fixedbugs", "issue9355.dir"))
  	check(err)
  
@@ -384,16 +448,24 @@
  	os.Remove("a.o")
  
  	// 6g/8g print the offset as dec, but 5g/9g print the offset as hex.
+ 	patterns := []string{
+ 		`rel 0\+\d t=1 \"\"\.x\+8\r?\n`,       // y = &x.b
+ 		`rel 0\+\d t=1 \"\"\.x\+(28|1c)\r?\n`, // z = &x.d.q
+ 		`rel 0\+\d t=1 \"\"\.b\+5\r?\n`,       // c = &b[5]
+ 		`rel 0\+\d t=1 \"\"\.x\+(88|58)\r?\n`, // w = &x.f[3].r
 --- test/fixedbugs/issue9862_run.go
 +++ test/fixedbugs/issue9862_run.go
-@@ -1,5 +1,5 @@
+@@ -1,26 +1,46 @@
  // +build !nacl
 -// run
 +// runtarget
  
  // Copyright 2015 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -10,12 +10,32 @@
+ // license that can be found in the LICENSE file.
+ 
+ // Check for compile or link error.
+ 
  package main
  
  import (
@@ -427,16 +499,24 @@
  	outstr := string(out)
  	if err == nil {
  		println("go run issue9862.go succeeded, should have failed\n", outstr)
+ 		return
+ 	}
+ 	if !strings.Contains(outstr, "symbol too large") {
+ 		println("go run issue9862.go gave unexpected error; want symbol too large:\n", outstr)
+ 	}
 --- test/linkmain_run.go
 +++ test/linkmain_run.go
-@@ -1,5 +1,5 @@
+@@ -1,26 +1,36 @@
  // +build !nacl
 -// run
 +// runtarget
  
  // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -10,12 +10,22 @@
+ // license that can be found in the LICENSE file.
+ 
+ // Run the sinit test.
+ 
  package main
  
  import (
@@ -459,7 +539,17 @@
  func cleanup() {
  	os.Remove("linkmain.o")
  	os.Remove("linkmain.a")
-@@ -51,16 +61,18 @@ func runFail(cmdline string) {
+ 	os.Remove("linkmain1.o")
+ 	os.Remove("linkmain1.a")
+ 	os.Remove("linkmain.exe")
+ }
+ 
+@@ -46,21 +56,23 @@ func runFail(cmdline string) {
+ 		fmt.Println(string(out))
+ 		fmt.Println("SHOULD HAVE FAILED!")
+ 		cleanup()
+ 		os.Exit(1)
+ 	}
  }
  
  func main() {
@@ -486,16 +576,114 @@
 +	runFail(goCmd() + " tool link -o linkmain.exe linkmain1.a")
  	cleanup()
  }
+--- test/linkobj.go
++++ test/linkobj.go
+@@ -1,31 +1,50 @@
+ // +build !nacl
+-// run
++// runtarget
+ 
+ // Copyright 2016 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Test the compiler -linkobj flag.
+ 
+ package main
+ 
+ import (
++	"flag"
+ 	"fmt"
+ 	"io/ioutil"
+ 	"log"
+ 	"os"
+ 	"os/exec"
+ 	"strings"
+ )
+ 
++var target = flag.String("target", "", "if non empty, use 'go_target' to compile test files and 'go_target_exec' to run the binaries")
++
++func goCmd() string {
++	if *target != "" {
++		return "go_" + *target
++	}
++	return "go"
++}
++
++func goRun(cmd ...string) string {
++	if *target == "" {
++		return run(cmd...)
++	} else {
++		return run(append([]string{"go_"+*target+"_exec"}, cmd...)...)
++	}
++}
++
+ var pwd, tmpdir string
+ 
+ func main() {
++	flag.Parse()
+ 	dir, err := ioutil.TempDir("", "go-test-linkobj-")
+ 	if err != nil {
+ 		log.Fatal(err)
+ 	}
+ 	pwd, err = os.Getwd()
+ 	if err != nil {
+ 		log.Fatal(err)
+ 	}
+@@ -71,33 +90,33 @@ func main() {
+ 
+ 		// The compiler expects the files being read to have the right suffix.
+ 		o := "o"
+ 		if round == 1 {
+ 			o = "a"
+ 		}
+ 
+ 		// inlining is disabled to make sure that the link objects contain needed code.
+-		run("go", "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p1."+o, "-linkobj", "p1.lo", "p1.go")
+-		run("go", "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p2."+o, "-linkobj", "p2.lo", "p2.go")
+-		run("go", "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p3."+o, "-linkobj", "p3.lo", "p3.go")
++		run(goCmd(), "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p1."+o, "-linkobj", "p1.lo", "p1.go")
++		run(goCmd(), "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p2."+o, "-linkobj", "p2.lo", "p2.go")
++		run(goCmd(), "tool", "compile", pkg, "-D", ".", "-I", ".", "-l", "-o", "p3."+o, "-linkobj", "p3.lo", "p3.go")
+ 
+ 		cp("p1."+o, "p1.oo")
+ 		cp("p2."+o, "p2.oo")
+ 		cp("p3."+o, "p3.oo")
+ 		cp("p1.lo", "p1."+o)
+ 		cp("p2.lo", "p2."+o)
+ 		cp("p3.lo", "p3."+o)
+-		out := runFail("go", "tool", "link", "p2."+o)
++		out := runFail(goCmd(), "tool", "link", "p2."+o)
+ 		if !strings.Contains(out, "not package main") {
+ 			fatalf("link p2.o failed but not for package main:\n%s", out)
+ 		}
+ 
+-		run("go", "tool", "link", "-L", ".", "-o", "a.out.exe", "p3."+o)
+-		out = run("./a.out.exe")
++		run(goCmd(), "tool", "link", "-L", ".", "-o", "a.out.exe", "p3."+o)
++		out = goRun("./a.out.exe")
+ 		if !strings.Contains(out, "hello from p1\nhello from p2\nhello from main\n") {
+ 			fatalf("running main, incorrect output:\n%s", out)
+ 		}
+ 
+ 		// ensure that mistaken future round can't use these
+ 		os.Remove("p1.o")
+ 		os.Remove("a.out.exe")
+ 	}
 --- test/linkx_run.go
 +++ test/linkx_run.go
-@@ -1,5 +1,5 @@
+@@ -1,35 +1,55 @@
  // +build !nacl
 -// run
 +// runtarget
  
  // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -11,20 +11,40 @@ package main
+ // license that can be found in the LICENSE file.
+ 
+ // Run the linkx test.
+ 
+ package main
  
  import (
  	"bytes"
@@ -526,7 +714,7 @@
 +
  func main() {
 +	flag.Parse()
- 	test(" ") // old deprecated syntax
+ 	// test(" ") // old deprecated & removed syntax
  	test("=") // new syntax
  }
  
@@ -537,7 +725,17 @@
  	var out, errbuf bytes.Buffer
  	cmd.Stdout = &out
  	cmd.Stderr = &errbuf
-@@ -44,7 +64,7 @@ func test(sep string) {
+ 	err := cmd.Run()
+ 	if err != nil {
+ 		fmt.Println(errbuf.String())
+ 		fmt.Println(out.String())
+ 		fmt.Println(err)
+@@ -39,25 +59,25 @@ func test(sep string) {
+ 	want := "hello\ntrumped\n"
+ 	got := out.String()
+ 	if got != want {
+ 		fmt.Printf("got %q want %q\n", got, want)
+ 		os.Exit(1)
  	}
  
  	// Issue 8810
@@ -546,7 +744,7 @@
  	_, err = cmd.CombinedOutput()
  	if err == nil {
  		fmt.Println("-X linker flag should not accept keys without values")
-@@ -52,7 +72,7 @@ func test(sep string) {
+ 		os.Exit(1)
  	}
  
  	// Issue 9621
@@ -555,16 +753,23 @@
  	outx, err := cmd.CombinedOutput()
  	if err == nil {
  		fmt.Println("-X linker flag should not overwrite non-strings")
+ 		os.Exit(1)
+ 	}
+ 	outstr := string(outx)
+ 	if !strings.Contains(outstr, "main.b") {
+ 		fmt.Printf("-X linker flag did not diagnose overwrite of main.b:\n%s\n", outstr)
 --- test/nosplit.go
 +++ test/nosplit.go
-@@ -1,5 +1,5 @@
+@@ -1,31 +1,49 @@
  // +build !nacl
 -// run
 +// runtarget
  
- // Copyright 2014 The Go Authors.  All rights reserved.
+ // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -9,6 +9,7 @@ package main
+ // license that can be found in the LICENSE file.
+ 
+ package main
  
  import (
  	"bytes"
@@ -572,7 +777,7 @@
  	"fmt"
  	"io/ioutil"
  	"log"
-@@ -16,11 +17,28 @@ import (
+ 	"os"
  	"os/exec"
  	"path/filepath"
  	"regexp"
@@ -602,7 +807,17 @@
  var tests = `
  # These are test cases for the linker analysis that detects chains of
  # nosplit functions that would cause a stack overflow.
-@@ -193,12 +211,13 @@ var (
+ #
+ # Lines beginning with # are comments.
+ #
+ # Each test case describes a sequence of functions, one per line.
+ # Each function definition is the function name, then the frame size,
+@@ -189,22 +207,23 @@ var (
+ 	commentRE = regexp.MustCompile(`(?m)^#.*`)
+ 	rejectRE  = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
+ 	lineRE    = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
+ 	callRE    = regexp.MustCompile(`\bcall (\w+)\b`)
+ 	callindRE = regexp.MustCompile(`\bcallind\b`)
  )
  
  func main() {
@@ -619,7 +834,17 @@
  	if err != nil {
  		bug()
  		fmt.Printf("running go tool compile -V: %v\n", err)
-@@ -338,7 +357,7 @@ TestCases:
+ 		return
+ 	}
+ 	if s := string(version); goarch == "amd64" && strings.Contains(s, "X:") && !strings.Contains(s, "framepointer") {
+ 		// Skip this test if framepointer is NOT enabled on AMD64
+ 		return
+@@ -340,17 +359,17 @@ TestCases:
+ 
+ 		if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
+ 			log.Fatal(err)
+ 		}
+ 		if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
  			log.Fatal(err)
  		}
  
@@ -628,9 +853,19 @@
  		cmd.Dir = dir
  		output, err := cmd.CombinedOutput()
  		if err == nil {
+ 			nok++
+ 			if reject {
+ 				bug()
+ 				fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
+ 			}
 --- test/run.go
 +++ test/run.go
-@@ -220,6 +220,16 @@ func goRun(runcmd runCmd, goname string, args ...string) (out []byte, err error)
+@@ -222,16 +222,26 @@ func goRun(runcmd runCmd, flags []string, goname string, args ...string) (out []
+ 		cmd = append(cmd, findExecCmd()...)
+ 	}
+ 	cmd = append(cmd, flags...)
+ 	cmd = append(cmd, goname)
+ 	cmd = append(cmd, args...)
  	return runcmd(cmd...)
  }
  
@@ -647,16 +882,36 @@
  // skipError describes why a test was skipped.
  type skipError string
  
-@@ -469,7 +479,7 @@ func (t *test) run() {
+ func (s skipError) Error() string { return string(s) }
+ 
+ func check(err error) {
+ 	if err != nil {
+ 		log.Fatal(err)
+@@ -484,17 +494,17 @@ func (t *test) run() {
+ 	}
+ 
+ 	// TODO: Clean up/simplify this switch statement.
+ 	switch action {
+ 	case "rundircmpout":
+ 		action = "rundir"
  	case "cmpout":
  		action = "run" // the run case already looks for <dir>/<test>.out files
- 		fallthrough
--	case "compile", "compiledir", "build", "run", "runoutput", "rundir":
-+	case "compile", "compiledir", "build", "run", "runtarget", "runoutput", "rundir":
- 		t.action = action
+-	case "compile", "compiledir", "build", "builddir", "run", "buildrun", "runoutput", "rundir":
++	case "compile", "compiledir", "build", "builddir", "run", "runtarget", "buildrun", "runoutput", "rundir":
+ 		// nothing to do
+ 	case "errorcheckandrundir":
+ 		wantError = false // should be no error if also will run
+ 	case "errorcheckwithauto":
+ 		action = "errorcheck"
+ 		wantAuto = true
+ 		wantError = true
  	case "errorcheck", "errorcheckdir", "errorcheckoutput":
- 		t.action = action
-@@ -653,6 +663,17 @@ func (t *test) run() {
+@@ -807,16 +817,27 @@ func (t *test) run() {
+ 		if err != nil {
+ 			t.err = err
+ 			return
+ 		}
+ 		if strings.Replace(string(out), "\r\n", "\n", -1) != t.expectedOutput() {
  			t.err = fmt.Errorf("incorrect output\n%s", out)
  		}
  
@@ -674,16 +929,25 @@
  	case "runoutput":
  		rungatec <- true
  		defer func() {
+ 			<-rungatec
+ 		}()
+ 		useTmp = false
+ 		out, err := goRun(runcmd, nil, t.goFileName(), args...)
+ 		if err != nil {
 --- test/sinit_run.go
 +++ test/sinit_run.go
-@@ -1,5 +1,5 @@
+@@ -1,28 +1,39 @@
  // +build !nacl
 -// run
 +// runtarget
  
  // Copyright 2014 The Go Authors. All rights reserved.
  // Use of this source code is governed by a BSD-style
-@@ -11,13 +11,24 @@ package main
+ // license that can be found in the LICENSE file.
+ 
+ // Run the sinit test.
+ 
+ package main
  
  import (
  	"bytes"
@@ -709,3 +973,8 @@
  	out, err := cmd.CombinedOutput()
  	if err != nil {
  		fmt.Println(string(out))
+ 		fmt.Println(err)
+ 		os.Exit(1)
+ 	}
+ 	os.Remove("sinit.o")
+ 
diff --git a/go/patch/go4.patch b/go/patch/go4.patch
index 1c96c09..290de39 100644
--- a/go/patch/go4.patch
+++ b/go/patch/go4.patch
@@ -2,15 +2,26 @@
 
 --- src/crypto/x509/x509_test.go
 +++ src/crypto/x509/x509_test.go
-@@ -19,6 +19,7 @@ import (
+@@ -13,29 +13,32 @@ import (
+ 	"crypto/rsa"
+ 	_ "crypto/sha256"
+ 	_ "crypto/sha512"
+ 	"crypto/x509/pkix"
+ 	"encoding/asn1"
+ 	"encoding/base64"
  	"encoding/hex"
  	"encoding/pem"
- 	"fmt"
 +	"flag"
+ 	"fmt"
  	"internal/testenv"
  	"math/big"
  	"net"
-@@ -28,6 +29,8 @@ import (
+ 	"net/url"
+ 	"os/exec"
+ 	"reflect"
+ 	"runtime"
+ 	"strings"
+ 	"testing"
  	"time"
  )
  
@@ -19,14 +30,24 @@
  func TestParsePKCS1PrivateKey(t *testing.T) {
  	block, _ := pem.Decode([]byte(pemPrivateKey))
  	priv, err := ParsePKCS1PrivateKey(block.Bytes)
-@@ -862,7 +865,13 @@ func TestParsePEMCRL(t *testing.T) {
+ 	if err != nil {
+ 		t.Errorf("Failed to parse private key: %s", err)
+ 		return
+ 	}
+ 	if priv.PublicKey.N.Cmp(rsaPrivateKey.PublicKey.N) != 0 ||
+@@ -1089,17 +1092,23 @@ func TestParsePEMCRL(t *testing.T) {
+ 	}
+ 
+ 	// Can't check the signature here without a package cycle.
+ }
+ 
  func TestImports(t *testing.T) {
  	testenv.MustHaveGoRun(t)
  
--	if err := exec.Command("go", "run", "x509_test_import.go").Run(); err != nil {
+-	if err := exec.Command(testenv.GoToolPath(t), "run", "x509_test_import.go").Run(); err != nil {
 +	var cmd *exec.Cmd
 +	if *target == "" {
-+		cmd = exec.Command("go", "run", "x509_test_import.go")
++		cmd = exec.Command(testenv.GoToolPath(t), "run", "x509_test_import.go")
 +	} else {
 +		cmd = exec.Command("go_"+*target, "run", "-exec", "go_"+*target+"_exec", "x509_test_import.go")
 +	}
@@ -34,27 +55,50 @@
  		t.Errorf("failed to run x509_test_import.go: %s", err)
  	}
  }
+ 
+ const derCRLBase64 = "MIINqzCCDJMCAQEwDQYJKoZIhvcNAQEFBQAwVjEZMBcGA1UEAxMQUEtJIEZJTk1FQ0NBTklDQTEVMBMGA1UEChMMRklOTUVDQ0FOSUNBMRUwEwYDVQQLEwxGSU5NRUNDQU5JQ0ExCzAJBgNVBAYTAklUFw0xMTA1MDQxNjU3NDJaFw0xMTA1MDQyMDU3NDJaMIIMBzAhAg4Ze1od49Lt1qIXBydAzhcNMDkwNzE2MDg0MzIyWjAAMCECDl0HSL9bcZ1Ci/UHJ0DPFw0wOTA3MTYwODQzMTNaMAAwIQIOESB9tVAmX3cY7QcnQNAXDTA5MDcxNjA4NDUyMlowADAhAg4S1tGAQ3mHt8uVBydA1RcNMDkwODA0MTUyNTIyWjAAMCECDlQ249Y7vtC25ScHJ0DWFw0wOTA4MDQxNTI1MzdaMAAwIQIOISMop3NkA4PfYwcnQNkXDTA5MDgwNDExMDAzNFowADAhAg56/BMoS29KEShTBydA2hcNMDkwODA0MTEwMTAzWjAAMCECDnBp/22HPH5CSWoHJ0DbFw0wOTA4MDQxMDU0NDlaMAAwIQIOV9IP+8CD8bK+XAcnQNwXDTA5MDgwNDEwNTcxN1owADAhAg4v5aRz0IxWqYiXBydA3RcNMDkwODA0MTA1NzQ1WjAAMCECDlOU34VzvZAybQwHJ0DeFw0wOTA4MDQxMDU4MjFaMAAwIAINO4CD9lluIxcwBydBAxcNMDkwNzIyMTUzMTU5WjAAMCECDgOllfO8Y1QA7/wHJ0ExFw0wOTA3MjQxMTQxNDNaMAAwIQIOJBX7jbiCdRdyjgcnQUQXDTA5MDkxNjA5MzAwOFowADAhAg5iYSAgmDrlH/RZBydBRRcNMDkwOTE2MDkzMDE3WjAAMCECDmu6k6srP3jcMaQHJ0FRFw0wOTA4MDQxMDU2NDBaMAAwIQIOX8aHlO0V+WVH4QcnQVMXDTA5MDgwNDEwNTcyOVowADAhAg5flK2rg3NnsRgDBydBzhcNMTEwMjAxMTUzMzQ2WjAAMCECDg35yJDL1jOPTgoHJ0HPFw0xMTAyMDExNTM0MjZaMAAwIQIOMyFJ6+e9iiGVBQcnQdAXDTA5MDkxODEzMjAwNVowADAhAg5Emb/Oykucmn8fBydB1xcNMDkwOTIxMTAxMDQ3WjAAMCECDjQKCncV+MnUavMHJ0HaFw0wOTA5MjIwODE1MjZaMAAwIQIOaxiFUt3dpd+tPwcnQfQXDTEwMDYxODA4NDI1MVowADAhAg5G7P8nO0tkrMt7BydB9RcNMTAwNjE4MDg0MjMwWjAAMCECDmTCC3SXhmDRst4HJ0H2Fw0wOTA5MjgxMjA3MjBaMAAwIQIOHoGhUr/pRwzTKgcnQfcXDTA5MDkyODEyMDcyNFowADAhAg50wrcrCiw8mQmPBydCBBcNMTAwMjE2MTMwMTA2WjAAMCECDifWmkvwyhEqwEcHJ0IFFw0xMDAyMTYxMzAxMjBaMAAwIQIOfgPmlW9fg+osNgcnQhwXDTEwMDQxMzA5NTIwMFowADAhAg4YHAGuA6LgCk7tBydCHRcNMTAwNDEzMDk1MTM4WjAAMCECDi1zH1bxkNJhokAHJ0IsFw0xMDA0MTMwOTU5MzBaMAAwIQIOMipNccsb/wo2fwcnQi0XDTEwMDQxMzA5NTkwMFowADAhAg46lCmvPl4GpP6ABydCShcNMTAwMTE5MDk1MjE3WjAAMCECDjaTcaj+wBpcGAsHJ0JLFw0xMDAxMTkwOTUyMzRaMAAwIQIOOMC13EOrBuxIOQcnQloXDTEwMDIwMTA5NDcwNVowADAhAg5KmZl+krz4RsmrBydCWxcNMTAwMjAxMDk0NjQwWjAAMCECDmLG3zQJ/fzdSsUHJ0JiFw0xMDAzMDEwOTUxNDBaMAAwIQIOP39ksgHdojf4owcnQmMXDTEwMDMwMTA5NTExN1owADAhAg4LDQzvWNRlD6v9BydCZBcNMTAwMzAxMDk0NjIyWjAAMCECDkmNfeclaFhIaaUHJ0JlFw0xMDAzMDEwOTQ2MDVaMAAwIQIOT/qWWfpH/m8NTwcnQpQXDTEwMDUxMTA5MTgyMVowADAhAg5m/ksYxvCEgJSvBydClRcNMTAwNTExMDkxODAxWjAAMCECDgvf3Ohq6JOPU9AHJ0KWFw0xMDA1MTEwOTIxMjNaMAAwIQIOKSPas10z4jNVIQcnQpcXDTEwMDUxMTA5MjEwMlowADAhAg4mCWmhoZ3lyKCDBydCohcNMTEwNDI4MTEwMjI1WjAAMCECDkeiyRsBMK0Gvr4HJ0KjFw0xMTA0MjgxMTAyMDdaMAAwIQIOa09b/nH2+55SSwcnQq4XDTExMDQwMTA4Mjk0NlowADAhAg5O7M7iq7gGplr1BydCrxcNMTEwNDAxMDgzMDE3WjAAMCECDjlT6mJxUjTvyogHJ0K1Fw0xMTAxMjcxNTQ4NTJaMAAwIQIODS/l4UUFLe21NAcnQrYXDTExMDEyNzE1NDgyOFowADAhAg5lPRA0XdOUF6lSBydDHhcNMTEwMTI4MTQzNTA1WjAAMCECDixKX4fFGGpENwgHJ0MfFw0xMTAxMjgxNDM1MzBaMAAwIQIORNBkqsPnpKTtbAcnQ08XDTEwMDkwOTA4NDg0MlowADAhAg5QL+EMM3lohedEBydDUBcNMTAwOTA5MDg0ODE5WjAAMCECDlhDnHK+HiTRAXcHJ0NUFw0xMDEwMTkxNjIxNDBaMAAwIQIOdBFqAzq/INz53gcnQ1UXDTEwMTAxOTE2MjA0NFowADAhAg4OjR7s8MgKles1BydDWhcNMTEwMTI3MTY1MzM2WjAAMCECDmfR/elHee+d0SoHJ0NbFw0xMTAxMjcxNjUzNTZaMAAwIQIOBTKv2ui+KFMI+wcnQ5YXDTEwMDkxNTEwMjE1N1owADAhAg49F3c/GSah+oRUBydDmxcNMTEwMTI3MTczMjMzWjAAMCECDggv4I61WwpKFMMHJ0OcFw0xMTAxMjcxNzMyNTVaMAAwIQIOXx/Y8sEvwS10LAcnQ6UXDTExMDEyODExMjkzN1owADAhAg5LSLbnVrSKaw/9BydDphcNMTEwMTI4MTEyOTIwWjAAMCECDmFFoCuhKUeACQQHJ0PfFw0xMTAxMTExMDE3MzdaMAAwIQIOQTDdFh2fSPF6AAcnQ+AXDTExMDExMTEwMTcxMFowADAhAg5B8AOXX61FpvbbBydD5RcNMTAxMDA2MTAxNDM2WjAAMCECDh41P2Gmi7PkwI4HJ0PmFw0xMDEwMDYxMDE2MjVaMAAwIQIOWUHGLQCd+Ale9gcnQ/0XDTExMDUwMjA3NTYxMFowADAhAg5Z2c9AYkikmgWOBydD/hcNMTEwNTAyMDc1NjM0WjAAMCECDmf/UD+/h8nf+74HJ0QVFw0xMTA0MTUwNzI4MzNaMAAwIQIOICvj4epy3MrqfwcnRBYXDTExMDQxNTA3Mjg1NlowADAhAg4bouRMfOYqgv4xBydEHxcNMTEwMzA4MTYyNDI1WjAAMCECDhebWHGoKiTp7pEHJ0QgFw0xMTAzMDgxNjI0NDhaMAAwIQIOX+qnxxAqJ8LtawcnRDcXDTExMDEzMTE1MTIyOFowADAhAg4j0fICqZ+wkOdqBydEOBcNMTEwMTMxMTUxMTQxWjAAMCECDhmXjsV4SUpWtAMHJ0RLFw0xMTAxMjgxMTI0MTJaMAAwIQIODno/w+zG43kkTwcnREwXDTExMDEyODExMjM1MlowADAhAg4b1gc88767Fr+LBydETxcNMTEwMTI4MTEwMjA4WjAAMCECDn+M3Pa1w2nyFeUHJ0RQFw0xMTAxMjgxMDU4NDVaMAAwIQIOaduoyIH61tqybAcnRJUXDTEwMTIxNTA5NDMyMlowADAhAg4nLqQPkyi3ESAKBydElhcNMTAxMjE1MDk0MzM2WjAAMCECDi504NIMH8578gQHJ0SbFw0xMTAyMTQxNDA1NDFaMAAwIQIOGuaM8PDaC5u1egcnRJwXDTExMDIxNDE0MDYwNFowADAhAg4ehYq/BXGnB5PWBydEnxcNMTEwMjA0MDgwOTUxWjAAMCECDkSD4eS4FxW5H20HJ0SgFw0xMTAyMDQwODA5MjVaMAAwIQIOOCcb6ilYObt1egcnRKEXDTExMDEyNjEwNDEyOVowADAhAg58tISWCCwFnKGnBydEohcNMTEwMjA0MDgxMzQyWjAAMCECDn5rjtabY/L/WL0HJ0TJFw0xMTAyMDQxMTAzNDFaMAAwDQYJKoZIhvcNAQEFBQADggEBAGnF2Gs0+LNiYCW1Ipm83OXQYP/bd5tFFRzyz3iepFqNfYs4D68/QihjFoRHQoXEB0OEe1tvaVnnPGnEOpi6krwekquMxo4H88B5SlyiFIqemCOIss0SxlCFs69LmfRYvPPvPEhoXtQ3ZThe0UvKG83GOklhvGl6OaiRf4Mt+m8zOT4Wox/j6aOBK6cw6qKCdmD+Yj1rrNqFGg1CnSWMoD6S6mwNgkzwdBUJZ22BwrzAAo4RHa2Uy3ef1FjwD0XtU5N3uDSxGGBEDvOe5z82rps3E22FpAA8eYl8kaXtmWqyvYU0epp4brGuTxCuBMCAsxt/OjIjeNNQbBGkwxgfYA0="
+ 
+ const pemCRLBase64 = "LS0tLS1CRUdJTiBYNTA5IENSTC0tLS0tDQpNSUlCOWpDQ0FWOENBUUV3RFFZSktvWklodmNOQVFFRkJRQXdiREVhTUJnR0ExVUVDaE1SVWxOQklGTmxZM1Z5DQphWFI1SUVsdVl5NHhIakFjQmdOVkJBTVRGVkpUUVNCUWRXSnNhV01nVW05dmRDQkRRU0IyTVRFdU1Dd0dDU3FHDQpTSWIzRFFFSkFSWWZjbk5oYTJWdmJuSnZiM1J6YVdkdVFISnpZWE5sWTNWeWFYUjVMbU52YlJjTk1URXdNakl6DQpNVGt5T0RNd1doY05NVEV3T0RJeU1Ua3lPRE13V2pDQmpEQktBaEVBckRxb2g5RkhKSFhUN09QZ3V1bjQrQmNODQpNRGt4TVRBeU1UUXlOekE1V2pBbU1Bb0dBMVVkRlFRRENnRUpNQmdHQTFVZEdBUVJHQTh5TURBNU1URXdNakUwDQpNalExTlZvd1BnSVJBTEd6blowOTVQQjVhQU9MUGc1N2ZNTVhEVEF5TVRBeU16RTBOVEF4TkZvd0dqQVlCZ05WDQpIUmdFRVJnUE1qQXdNakV3TWpNeE5EVXdNVFJhb0RBd0xqQWZCZ05WSFNNRUdEQVdnQlQxVERGNlVRTS9MTmVMDQpsNWx2cUhHUXEzZzltekFMQmdOVkhSUUVCQUlDQUlRd0RRWUpLb1pJaHZjTkFRRUZCUUFEZ1lFQUZVNUFzNk16DQpxNVBSc2lmYW9iUVBHaDFhSkx5QytNczVBZ2MwYld5QTNHQWR4dXI1U3BQWmVSV0NCamlQL01FSEJXSkNsQkhQDQpHUmNxNXlJZDNFakRrYUV5eFJhK2k2N0x6dmhJNmMyOUVlNks5cFNZd2ppLzdSVWhtbW5Qclh0VHhsTDBsckxyDQptUVFKNnhoRFJhNUczUUE0Q21VZHNITnZicnpnbUNZcHZWRT0NCi0tLS0tRU5EIFg1MDkgQ1JMLS0tLS0NCg0K"
+ 
+--- src/runtime/crash_cgo_test.go
++++ src/runtime/crash_cgo_test.go
+@@ -279,17 +279,17 @@ func testCgoPprof(t *testing.T, buildArg, runArg string) {
+ 	}
+ 	testenv.MustHaveGoRun(t)
+ 
+ 	exe, err := buildTestProg(t, "testprogcgo", buildArg)
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 
+-	got, err := testenv.CleanCmdEnv(exec.Command(exe, runArg)).CombinedOutput()
++	got, err := testenv.CleanCmdEnv(goExecCmd(exe, runArg)).CombinedOutput()
+ 	if err != nil {
+ 		if testenv.Builder() == "linux-amd64-alpine" {
+ 			// See Issue 18243 and Issue 19938.
+ 			t.Skipf("Skipping failing test on Alpine (golang.org/issue/18243). Ignoring error: %v", err)
+ 		}
+ 		t.Fatal(err)
+ 	}
+ 	fn := strings.TrimSpace(string(got))
 --- src/runtime/crash_test.go
 +++ src/runtime/crash_test.go
-@@ -5,6 +5,7 @@
- package runtime_test
- 
- import (
-+	"flag"
- 	"fmt"
- 	"internal/testenv"
- 	"io/ioutil"
-@@ -18,6 +19,25 @@ import (
+@@ -17,16 +17,35 @@ import (
+ 	"runtime"
+ 	"strconv"
+ 	"strings"
+ 	"sync"
  	"testing"
+ 	"time"
  )
  
 +var target = flag.String("target", "", "if non empty, use 'go_target' to compile test files and 'go_target_exec' to run the binaries")
 +
-+func goCmd() string {
++func goCmd(t *testing.T) string {
 +	if *target != "" {
 +		return "go_" + *target
 +	}
-+	return "go"
++	return testenv.GoToolPath(t)
 +}
 +
 +func goExecCmd(name string, arg ...string) *exec.Cmd {
@@ -70,32 +114,86 @@
  var toRemove []string
  
  func TestMain(m *testing.M) {
-@@ -65,7 +85,7 @@ func runTestProg(t *testing.T, binary, name string) string {
+ 	status := m.Run()
+ 	for _, file := range toRemove {
+ 		os.RemoveAll(file)
+ 	}
+ 	os.Exit(status)
+@@ -50,17 +69,17 @@ func runTestProg(t *testing.T, binary, name string, env ...string) string {
+ 
+ 	testenv.MustHaveGoBuild(t)
+ 
+ 	exe, err := buildTestProg(t, binary)
  	if err != nil {
  		t.Fatal(err)
  	}
--	got, _ := testEnv(exec.Command(exe, name)).CombinedOutput()
-+	got, _ := testEnv(goExecCmd(exe, name)).CombinedOutput()
- 	return string(got)
- }
  
-@@ -92,7 +112,7 @@ func buildTestProg(t *testing.T, binary string) (string, error) {
+-	cmd := testenv.CleanCmdEnv(exec.Command(exe, name))
++	cmd := testenv.CleanCmdEnv(goExecCmd(exe, name))
+ 	cmd.Env = append(cmd.Env, env...)
+ 	if testing.Short() {
+ 		cmd.Env = append(cmd.Env, "RUNTIME_TEST_SHORT=1")
+ 	}
+ 	var b bytes.Buffer
+ 	cmd.Stdout = &b
+ 	cmd.Stderr = &b
+ 	if err := cmd.Start(); err != nil {
+@@ -125,17 +144,17 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error)
+ 		name += "_" + strings.Join(flags, "_")
+ 	}
+ 	target, ok := testprog.target[name]
+ 	if ok {
+ 		return target.exe, target.err
  	}
  
- 	exe := filepath.Join(testprog.dir, binary+".exe")
--	cmd := exec.Command("go", "build", "-o", exe)
-+	cmd := exec.Command(goCmd(), "build", "-o", exe)
+ 	exe := filepath.Join(testprog.dir, name+".exe")
+-	cmd := exec.Command(testenv.GoToolPath(t), append([]string{"build", "-o", exe}, flags...)...)
++	cmd := exec.Command(goCmd(t), append([]string{"build", "-o", exe}, flags...)...)
  	cmd.Dir = "testdata/" + binary
- 	out, err := testEnv(cmd).CombinedOutput()
+ 	out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
  	if err != nil {
+ 		target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
+ 		testprog.target[name] = target
+ 		return "", target.err
+ 	}
+ 	target.exe = exe
+@@ -456,17 +475,17 @@ func TestPanicLoop(t *testing.T) {
+ func TestMemPprof(t *testing.T) {
+ 	testenv.MustHaveGoRun(t)
+ 
+ 	exe, err := buildTestProg(t, "testprog")
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 
+-	got, err := testenv.CleanCmdEnv(exec.Command(exe, "MemProf")).CombinedOutput()
++	got, err := testenv.CleanCmdEnv(goExecCmd(exe, "MemProf")).CombinedOutput()
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 	fn := strings.TrimSpace(string(got))
+ 	defer os.Remove(fn)
+ 
+ 	for try := 0; try < 2; try++ {
+ 		cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top"))
 --- src/runtime/crash_unix_test.go
 +++ src/runtime/crash_unix_test.go
-@@ -157,7 +157,7 @@ func TestSignalExitStatus(t *testing.T) {
+@@ -244,17 +244,17 @@ func testPanicSystemstackInternal() {
+ }
+ 
+ func TestSignalExitStatus(t *testing.T) {
+ 	testenv.MustHaveGoBuild(t)
+ 	exe, err := buildTestProg(t, "testprog")
  	if err != nil {
  		t.Fatal(err)
  	}
--	err = testEnv(exec.Command(exe, "SignalExitStatus")).Run()
-+	err = testEnv(goExecCmd(exe, "SignalExitStatus")).Run()
+-	err = testenv.CleanCmdEnv(exec.Command(exe, "SignalExitStatus")).Run()
++	err = testenv.CleanCmdEnv(goExecCmd(exe, "SignalExitStatus")).Run()
  	if err == nil {
  		t.Error("test program succeeded unexpectedly")
  	} else if ee, ok := err.(*exec.ExitError); !ok {
+ 		t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
+ 	} else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
+ 		t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
+ 	} else if !ws.Signaled() || ws.Signal() != syscall.SIGTERM {
+ 		t.Errorf("got %v; expected SIGTERM", ee)
diff --git a/go/patch/go5.patch b/go/patch/go5.patch
index fa65658..7189c89 100644
--- a/go/patch/go5.patch
+++ b/go/patch/go5.patch
@@ -1,198 +1,160 @@
-misc/cgo/testcshared: add support for -target.
+runtime: deadlock detection does not work when using external linker.
 
---- misc/cgo/testcshared/test.bash
-+++ misc/cgo/testcshared/test.bash
-@@ -14,9 +14,23 @@ if [ ! -f src/libgo/libgo.go ]; then
- 	exit 1
- fi
- 
--goos=$(go env GOOS)
--goarch=$(go env GOARCH)
--goroot=$(go env GOROOT)
-+function target()
-+	{
-+	[[ -n "${target}" ]]
-+	}
-+
-+function go_target()
-+	{
-+	if target; then
-+		go_${target} "$@"
-+	else
-+		go "$@"
-+	fi
-+	}
-+
-+goos=$(go_target env GOOS)
-+goarch=$(go_target env GOARCH)
-+goroot=$(go_target env GOROOT)
- if [ ! -d "$goroot" ]; then
- 	echo 'misc/cgo/testcshared/test.bash cannnot find GOROOT' 1>&2
- 	echo '$GOROOT:' "$GOROOT" 1>&2
-@@ -31,8 +45,10 @@ if [ "${goos}" == "darwin" ]; then
- 	installdir=pkg/${goos}_${goarch}_testcshared
- fi
- 
--# Temporary directory on the android device.
--androidpath=/data/local/tmp/testcshared-$$
-+# Temporary directory on the android/chromeos device.
-+if target; then
-+	remotepath=$(target_tmpdir)/testcshared-$$
-+fi
- 
- function cleanup() {
- 	rm -f libgo.$libext libgo2.$libext libgo4.$libext libgo5.$libext
-@@ -40,37 +56,33 @@ function cleanup() {
- 	rm -f testp testp2 testp3 testp4 testp5
- 	rm -rf pkg "${goroot}/${installdir}"
- 
--	if [ "$goos" == "android" ]; then
--		adb shell rm -rf "$androidpath"
-+	if target; then
-+		target_sh "${target}" "rm -rf $remotepath"
- 	fi
- }
- trap cleanup EXIT
- 
--if [ "$goos" == "android" ]; then
--	adb shell mkdir -p "$androidpath"
-+if target; then
-+	target_sh "${target}" "mkdir -p $remotepath"
- fi
- 
- function run() {
--	case "$goos" in
--	"android")
-+	if target; then
- 		local args=$@
--		output=$(adb shell "cd ${androidpath}; $@")
--		output=$(echo $output|tr -d '\r')
-+		output=$(target_sh "${target}" "cd ${remotepath}; $@")
- 		case $output in
- 			*PASS) echo "PASS";; 
- 			*) echo "$output";;
- 		esac
--		;;
--	*)
-+	else
- 		echo $(env $@)
--		;;
--	esac
-+	fi
+--- src/runtime/crash_test.go
++++ src/runtime/crash_test.go
+@@ -214,32 +214,37 @@ func testDeadlock(t *testing.T, name string) {
+ 	output := runTestProg(t, "testprog", name)
+ 	want := "fatal error: all goroutines are asleep - deadlock!\n"
+ 	if !strings.HasPrefix(output, want) {
+ 		t.Fatalf("output does not start with %q:\n%s", want, output)
+ 	}
  }
  
- function binpush() {
- 	bin=${1}
--	if [ "$goos" == "android" ]; then
--		adb push "$bin"  "${androidpath}/${bin}" 2>/dev/null
-+	if target; then
-+		target_cp "$bin" "${target}:${remotepath}/${bin}"
- 	fi
+ func TestSimpleDeadlock(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	testDeadlock(t, "SimpleDeadlock")
  }
  
-@@ -84,9 +96,9 @@ if [ "$goos" == "darwin" ]; then
- fi
+ func TestInitDeadlock(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	testDeadlock(t, "InitDeadlock")
+ }
  
- # Create the header files.
--GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
-+GOPATH=$(pwd) go_target install -buildmode=c-shared $suffix libgo
+ func TestLockedDeadlock(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	testDeadlock(t, "LockedDeadlock")
+ }
  
--GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
-+GOPATH=$(pwd) go_target build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
- binpush libgo.$libext
+ func TestLockedDeadlock2(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	testDeadlock(t, "LockedDeadlock2")
+ }
  
- if [ "$goos" == "linux" ] || [ "$goos" == "android" ] ; then
-@@ -96,8 +108,8 @@ if [ "$goos" == "linux" ] || [ "$goos" == "android" ] ; then
-     fi
- fi
+ func TestGoexitDeadlock(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	output := runTestProg(t, "testprog", "GoexitDeadlock")
+ 	want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ 	if !strings.Contains(output, want) {
+ 		t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ 	}
+ }
  
--GOGCCFLAGS=$(go env GOGCCFLAGS)
--if [ "$goos" == "android" ]; then
-+GOGCCFLAGS=$(go_target env GOGCCFLAGS)
-+if target; then
- 	GOGCCFLAGS="${GOGCCFLAGS} -pie"
- fi
+ func TestStackOverflow(t *testing.T) {
+@@ -266,16 +271,17 @@ panic: again
+ `
+ 	if !strings.HasPrefix(output, want) {
+ 		t.Fatalf("output does not start with %q:\n%s", want, output)
+ 	}
  
-@@ -105,7 +117,7 @@ status=0
+ }
  
- # test0: exported symbols in shared lib are accessible.
- # TODO(iant): using _shared here shouldn't really be necessary.
--$(go env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c libgo.$libext
-+$(go_target env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c libgo.$libext
- binpush testp
+ func TestGoexitCrash(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	output := runTestProg(t, "testprog", "GoexitExit")
+ 	want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ 	if !strings.Contains(output, want) {
+ 		t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ 	}
+ }
  
- output=$(run LD_LIBRARY_PATH=. ./testp)
-@@ -115,7 +127,7 @@ if [ "$output" != "PASS" ]; then
- fi
+ func TestGoexitDefer(t *testing.T) {
+@@ -324,16 +330,17 @@ func TestBreakpoint(t *testing.T) {
+ 	// "runtime.Breakpoint(...)" instead of "runtime.Breakpoint()".
+ 	want := "runtime.Breakpoint("
+ 	if !strings.Contains(output, want) {
+ 		t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ 	}
+ }
  
- # test1: shared library can be dynamically loaded and exported symbols are accessible.
--$(go env CC) ${GOGCCFLAGS} -o testp main1.c -ldl
-+$(go_target env CC) ${GOGCCFLAGS} -o testp main1.c -ldl
- binpush testp
- output=$(run ./testp ./libgo.$libext)
- if [ "$output" != "PASS" ]; then
-@@ -124,13 +136,13 @@ if [ "$output" != "PASS" ]; then
- fi
+ func TestGoexitInPanic(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	// see issue 8774: this code used to trigger an infinite recursion
+ 	output := runTestProg(t, "testprog", "GoexitInPanic")
+ 	want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
+ 	if !strings.HasPrefix(output, want) {
+ 		t.Fatalf("output does not start with %q:\n%s", want, output)
+ 	}
+ }
  
- # test2: tests libgo2 which does not export any functions.
--GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext libgo2
-+GOPATH=$(pwd) go_target build -buildmode=c-shared $suffix -o libgo2.$libext libgo2
- binpush libgo2.$libext
- linkflags="-Wl,--no-as-needed"
- if [ "$goos" == "darwin" ]; then
- 	linkflags=""
- fi
--$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
-+$(go_target env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
- binpush testp2
- output=$(run LD_LIBRARY_PATH=. ./testp2)
- if [ "$output" != "PASS" ]; then
-@@ -138,9 +150,9 @@ if [ "$output" != "PASS" ]; then
- 	status=1
- fi
+@@ -388,16 +395,17 @@ func TestPanicAfterGoexit(t *testing.T) {
+ 	output := runTestProg(t, "testprog", "PanicAfterGoexit")
+ 	want := "panic: hello"
+ 	if !strings.HasPrefix(output, want) {
+ 		t.Fatalf("output does not start with %q:\n%s", want, output)
+ 	}
+ }
  
--# test3: tests main.main is exported on android.
--if [ "$goos" == "android" ]; then
--	$(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
-+# test3: tests main.main is exported on android/chromeos.
-+if target; then
-+	$(go_target env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
- 	binpush testp3
- 	output=$(run ./testp ./libgo.so)
- 	if [ "$output" != "PASS" ]; then
-@@ -150,14 +162,14 @@ if [ "$goos" == "android" ]; then
- fi
+ func TestRecoveredPanicAfterGoexit(t *testing.T) {
++	t.Skip("deadlock detection fails with external linker")
+ 	output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit")
+ 	want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
+ 	if !strings.HasPrefix(output, want) {
+ 		t.Fatalf("output does not start with %q:\n%s", want, output)
+ 	}
+ }
  
- # test4: tests signal handlers
--GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo4.$libext libgo4
-+GOPATH=$(pwd) go_target build -buildmode=c-shared $suffix -o libgo4.$libext libgo4
- binpush libgo4.$libext
--$(go env CC) ${GOGCCFLAGS} -pthread -o testp4 main4.c -ldl
-+$(go_target env CC) ${GOGCCFLAGS} -pthread -o testp4 main4.c -ldl
- binpush testp4
- output=$(run ./testp4 ./libgo4.$libext 2>&1)
- if test "$output" != "PASS"; then
-     echo "FAIL test4 got ${output}"
--    if test "$goos" != "android"; then
-+    if ! target; then
- 	echo "re-running test4 in verbose mode"
- 	./testp4 ./libgo4.$libext verbose
-     fi
-@@ -165,14 +177,14 @@ if test "$output" != "PASS"; then
- fi
+ func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
+--- src/runtime/proc_test.go
++++ src/runtime/proc_test.go
+@@ -349,19 +349,20 @@ func TestGCFairness2(t *testing.T) {
+ 	want := "OK\n"
+ 	if output != want {
+ 		t.Fatalf("want %s, got %s\n", want, output)
+ 	}
+ }
  
- # test5: tests signal handlers with os/signal.Notify
--GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo5.$libext libgo5
-+GOPATH=$(pwd) go_target build -buildmode=c-shared $suffix -o libgo5.$libext libgo5
- binpush libgo5.$libext
--$(go env CC) ${GOGCCFLAGS} -pthread -o testp5 main5.c -ldl
-+$(go_target env CC) ${GOGCCFLAGS} -pthread -o testp5 main5.c -ldl
- binpush testp5
- output=$(run ./testp5 ./libgo5.$libext 2>&1)
- if test "$output" != "PASS"; then
-     echo "FAIL test5 got ${output}"
--    if test "$goos" != "android"; then
-+    if ! target; then
- 	echo "re-running test5 in verbose mode"
- 	./testp5 ./libgo5.$libext verbose
-     fi
+ func TestNumGoroutine(t *testing.T) {
+ 	output := runTestProg(t, "testprog", "NumGoroutine")
+-	want := "1\n"
+-	if output != want {
+-		t.Fatalf("want %q, got %q", want, output)
++	want1 := "1\n"
++	want2 := "2\n"
++	if output != want1 && output != want2 {
++		t.Fatalf("want %q, got %q", want1, output)
+ 	}
+ 
+ 	buf := make([]byte, 1<<20)
+ 
+ 	// Try up to 10 times for a match before giving up.
+ 	// This is a fundamentally racy check but it's important
+ 	// to notice if NumGoroutine and Stack are _always_ out of sync.
+ 	for i := 0; ; i++ {
+--- test/fixedbugs/bug429_run.go
++++ test/fixedbugs/bug429_run.go
+@@ -1,10 +1,10 @@
+ // +build !nacl
+-// runtarget
++// skip
+ 
+ // Copyright 2014 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Run the bug429.go test.
+ 
+ package main
+--- test/goprint.go
++++ test/goprint.go
+@@ -3,19 +3,14 @@
+ // Copyright 2011 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+ 
+ // Test that println can be the target of a go statement.
+ 
+ package main
+ 
+-import (
+-	"runtime"
+-	"time"
+-)
++import "time"
+ 
+ func main() {
+ 	go println(42, true, false, true, 1.5, "world", (chan int)(nil), []int(nil), (map[string]int)(nil), (func())(nil), byte(255))
+-	for runtime.NumGoroutine() > 1 {
+-		time.Sleep(10*time.Millisecond)
+-	}
++	time.Sleep(100*time.Millisecond)
+ }
diff --git a/go/patch/go6.patch b/go/patch/go6.patch
index 7f1e4c0..9f32ed8 100644
--- a/go/patch/go6.patch
+++ b/go/patch/go6.patch
@@ -1,65 +1,230 @@
-runtime: deadlock detection does not work when using external linker.
+all: disable some tests that have trouble running remotely.
 
+--- src/encoding/gob/encoder_test.go
++++ src/encoding/gob/encoder_test.go
+@@ -1125,20 +1125,17 @@ func TestBadData(t *testing.T) {
+ 		if !strings.Contains(err.Error(), test.error) {
+ 			t.Errorf("#%d: decode: expected %q error, got %s", i, test.error, err.Error())
+ 		}
+ 	}
+ }
+ 
+ // TestHugeWriteFails tests that enormous messages trigger an error.
+ func TestHugeWriteFails(t *testing.T) {
+-	if testing.Short() {
+-		// Requires allocating a monster, so don't do this from all.bash.
+-		t.Skip("skipping huge allocation in short mode")
+-	}
++	t.Skip("skipping test due to huge memory requirement")
+ 	huge := make([]byte, tooBig)
+ 	huge[0] = 7 // Make sure it's not all zeros.
+ 	buf := new(bytes.Buffer)
+ 	err := NewEncoder(buf).Encode(huge)
+ 	if err == nil {
+ 		t.Fatalf("expected error for huge slice")
+ 	}
+ 	if !strings.Contains(err.Error(), "message too big") {
+--- src/runtime/crash_cgo_test.go
++++ src/runtime/crash_cgo_test.go
+@@ -246,20 +246,17 @@ func TestCgoCCodeSIGPROF(t *testing.T) {
+ 	got := runTestProg(t, "testprogcgo", "CgoCCodeSIGPROF")
+ 	want := "OK\n"
+ 	if got != want {
+ 		t.Errorf("expected %q got %v", want, got)
+ 	}
+ }
+ 
+ func TestCgoCrashTraceback(t *testing.T) {
+-	t.Parallel()
+-	if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le") {
+-		t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
+-	}
++	t.Skipf("skip running remotely")
+ 	got := runTestProg(t, "testprogcgo", "CrashTraceback")
+ 	for i := 1; i <= 3; i++ {
+ 		if !strings.Contains(got, fmt.Sprintf("cgo symbolizer:%d", i)) {
+ 			t.Errorf("missing cgo symbolizer:%d", i)
+ 		}
+ 	}
+ }
+ 
+@@ -268,20 +265,17 @@ func TestCgoTracebackContext(t *testing.T) {
+ 	got := runTestProg(t, "testprogcgo", "TracebackContext")
+ 	want := "OK\n"
+ 	if got != want {
+ 		t.Errorf("expected %q got %v", want, got)
+ 	}
+ }
+ 
+ func testCgoPprof(t *testing.T, buildArg, runArg string) {
+-	t.Parallel()
+-	if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le") {
+-		t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
+-	}
++	t.Skipf("skip pprof test")
+ 	testenv.MustHaveGoRun(t)
+ 
+ 	exe, err := buildTestProg(t, "testprogcgo", buildArg)
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 
+ 	got, err := testenv.CleanCmdEnv(goExecCmd(exe, runArg)).CombinedOutput()
 --- src/runtime/crash_test.go
 +++ src/runtime/crash_test.go
-@@ -177,22 +177,27 @@ func testDeadlock(t *testing.T, name string) {
+@@ -476,16 +476,17 @@ func TestPanicDeadlockSyscall(t *testing.T) {
+ func TestPanicLoop(t *testing.T) {
+ 	output := runTestProg(t, "testprog", "PanicLoop")
+ 	if want := "panic while printing panic value"; !strings.Contains(output, want) {
+ 		t.Errorf("output does not contain %q:\n%s", want, output)
+ 	}
  }
  
- func TestSimpleDeadlock(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	testDeadlock(t, "SimpleDeadlock")
+ func TestMemPprof(t *testing.T) {
++	t.Skipf("skip pprof test")
+ 	testenv.MustHaveGoRun(t)
+ 
+ 	exe, err := buildTestProg(t, "testprog")
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 
+ 	got, err := testenv.CleanCmdEnv(goExecCmd(exe, "MemProf")).CombinedOutput()
+--- src/runtime/crash_unix_test.go
++++ src/runtime/crash_unix_test.go
+@@ -169,19 +169,17 @@ func loop(i int, c chan bool) {
+ 
+ func TestPanicSystemstack(t *testing.T) {
+ 	// Test that GOTRACEBACK=crash prints both the system and user
+ 	// stack of other threads.
+ 
+ 	// The GOTRACEBACK=crash handler takes 0.1 seconds even if
+ 	// it's not writing a core file and potentially much longer if
+ 	// it is. Skip in short mode.
+-	if testing.Short() {
+-		t.Skip("Skipping in short mode (GOTRACEBACK=crash is slow)")
+-	}
++	t.Skip("Skipping (GOTRACEBACK=crash hangs on arm)")
+ 
+ 	if runtime.Sigisblocked(int(syscall.SIGQUIT)) {
+ 		t.Skip("skipping; SIGQUIT is blocked, see golang.org/issue/19196")
+ 	}
+ 
+ 	t.Parallel()
+ 	cmd := exec.Command(os.Args[0], "testPanicSystemstackInternal")
+ 	cmd = testenv.CleanCmdEnv(cmd)
+@@ -239,16 +237,17 @@ func init() {
  }
  
- func TestInitDeadlock(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	testDeadlock(t, "InitDeadlock")
+ func testPanicSystemstackInternal() {
+ 	runtime.BlockOnSystemStack()
+ 	os.Exit(1) // Should be unreachable.
  }
  
- func TestLockedDeadlock(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	testDeadlock(t, "LockedDeadlock")
+ func TestSignalExitStatus(t *testing.T) {
++	t.Skipf("skip running remotely")
+ 	testenv.MustHaveGoBuild(t)
+ 	exe, err := buildTestProg(t, "testprog")
+ 	if err != nil {
+ 		t.Fatal(err)
+ 	}
+ 	err = testenv.CleanCmdEnv(goExecCmd(exe, "SignalExitStatus")).Run()
+ 	if err == nil {
+ 		t.Error("test program succeeded unexpectedly")
+--- src/runtime/fastlog2_test.go
++++ src/runtime/fastlog2_test.go
+@@ -11,21 +11,17 @@ import (
+ )
+ 
+ func TestFastLog2(t *testing.T) {
+ 	// Compute the euclidean distance between math.Log2 and the FastLog2
+ 	// implementation over the range of interest for heap sampling.
+ 	const randomBitCount = 26
+ 	var e float64
+ 
+-	inc := 1
+-	if testing.Short() {
+-		// Check 1K total values, down from 64M.
+-		inc = 1 << 16
+-	}
++	inc := 1 << 16
+ 	for i := 1; i < 1<<randomBitCount; i += inc {
+ 		l, fl := math.Log2(float64(i)), runtime.Fastlog2(float64(i))
+ 		d := l - fl
+ 		e += d * d
+ 	}
+ 	e = math.Sqrt(e)
+ 
+ 	if e > 1.0 {
+--- src/runtime/hash_test.go
++++ src/runtime/hash_test.go
+@@ -156,19 +156,17 @@ func TestSmhasherZeros(t *testing.T) {
+ 	for i := 0; i <= N; i++ {
+ 		h.addB(b[:i])
+ 	}
+ 	h.check(t)
  }
  
- func TestLockedDeadlock2(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	testDeadlock(t, "LockedDeadlock2")
+ // Strings with up to two nonzero bytes all have distinct hashes.
+ func TestSmhasherTwoNonzero(t *testing.T) {
+-	if testing.Short() {
+-		t.Skip("Skipping in short mode")
+-	}
++	t.Skip("skipping test due to huge memory requirement")
+ 	h := newHashSet()
+ 	for n := 2; n <= 16; n++ {
+ 		twoNonZero(h, n)
+ 	}
+ 	h.check(t)
+ }
+ func twoNonZero(h *HashSet, n int) {
+ 	b := make([]byte, n)
+@@ -259,19 +257,17 @@ func setbits(h *HashSet, b []byte, i int, k int) {
+ 		setbits(h, b, j+1, k-1)
+ 		b[j/8] &= byte(^(1 << uint(j&7)))
+ 	}
  }
  
- func TestGoexitDeadlock(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	output := runTestProg(t, "testprog", "GoexitDeadlock")
- 	want := "no goroutines (main called runtime.Goexit) - deadlock!"
- 	if !strings.Contains(output, want) {
-@@ -229,6 +234,7 @@ panic: again
+ // Test all possible combinations of n blocks from the set s.
+ // "permutation" is a bad name here, but it is what Smhasher uses.
+ func TestSmhasherPermutation(t *testing.T) {
+-	if testing.Short() {
+-		t.Skip("Skipping in short mode")
+-	}
++	t.Skip("skipping test due to huge memory requirement")
+ 	permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
+ 	permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
+ 	permutation(t, []uint32{0, 1}, 20)
+ 	permutation(t, []uint32{0, 1 << 31}, 20)
+ 	permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 6)
+ }
+ func permutation(t *testing.T, s []uint32, n int) {
+ 	b := make([]byte, n*4)
+--- src/runtime/pprof/pprof_test.go
++++ src/runtime/pprof/pprof_test.go
+@@ -278,24 +278,17 @@ func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Dur
+ 	return ok
  }
  
- func TestGoexitCrash(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	output := runTestProg(t, "testprog", "GoexitExit")
- 	want := "no goroutines (main called runtime.Goexit) - deadlock!"
- 	if !strings.Contains(output, want) {
-@@ -285,6 +291,7 @@ func TestBreakpoint(t *testing.T) {
- }
+ // Fork can hang if preempted with signals frequently enough (see issue 5517).
+ // Ensure that we do not do this.
+ func TestCPUProfileWithFork(t *testing.T) {
+ 	testenv.MustHaveExec(t)
  
- func TestGoexitInPanic(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	// see issue 8774: this code used to trigger an infinite recursion
- 	output := runTestProg(t, "testprog", "GoexitInPanic")
- 	want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
-@@ -303,6 +310,7 @@ func TestPanicAfterGoexit(t *testing.T) {
- }
- 
- func TestRecoveredPanicAfterGoexit(t *testing.T) {
-+	t.Skip("deadlock detection fails with external linker")
- 	output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit")
- 	want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
- 	if !strings.HasPrefix(output, want) {
---- test/fixedbugs/bug429_run.go
-+++ test/fixedbugs/bug429_run.go
-@@ -1,5 +1,5 @@
- // +build !nacl
--// runtarget
-+// skip
- 
- // Copyright 2014 The Go Authors. All rights reserved.
- // Use of this source code is governed by a BSD-style
+-	heap := 1 << 30
+-	if runtime.GOOS == "android" {
+-		// Use smaller size for Android to avoid crash.
+-		heap = 100 << 20
+-	}
+-	if testing.Short() {
+-		heap = 100 << 20
+-	}
++	heap := 100 << 20
+ 	// This makes fork slower.
+ 	garbage := make([]byte, heap)
+ 	// Need to touch the slice, otherwise it won't be paged in.
+ 	done := make(chan bool)
+ 	go func() {
+ 		for i := range garbage {
+ 			garbage[i] = 42
+ 		}
diff --git a/go/patch/go7.patch b/go/patch/go7.patch
deleted file mode 100644
index 7b769cf..0000000
--- a/go/patch/go7.patch
+++ /dev/null
@@ -1,139 +0,0 @@
-all: disable some tests that take a long time or allocate a lot of memory.
-
---- src/encoding/gob/encoder_test.go
-+++ src/encoding/gob/encoder_test.go
-@@ -1003,10 +1003,7 @@ func TestBadData(t *testing.T) {
- 
- // TestHugeWriteFails tests that enormous messages trigger an error.
- func TestHugeWriteFails(t *testing.T) {
--	if testing.Short() {
--		// Requires allocating a monster, so don't do this from all.bash.
--		t.Skip("skipping huge allocation in short mode")
--	}
-+	t.Skip("skipping test due to huge memory requirement")
- 	huge := make([]byte, tooBig)
- 	huge[0] = 7 // Make sure it's not all zeros.
- 	buf := new(bytes.Buffer)
---- src/math/big/float_test.go
-+++ src/math/big/float_test.go
-@@ -1428,10 +1428,7 @@ func TestFloatQuo(t *testing.T) {
- // TestFloatQuoSmoke tests all divisions x/y for values x, y in the range [-n, +n];
- // it serves as a smoke test for basic correctness of division.
- func TestFloatQuoSmoke(t *testing.T) {
--	n := 1000
--	if testing.Short() {
--		n = 10
--	}
-+	n := 10
- 
- 	const dprec = 3         // max. precision variation
- 	const prec = 10 + dprec // enough bits to hold n precisely
---- src/math/big/rat_test.go
-+++ src/math/big/rat_test.go
-@@ -430,10 +430,7 @@ func TestFloat64Distribution(t *testing.T) {
- 		9,
- 		11,
- 	}
--	var winc, einc = uint64(1), 1 // soak test (~75s on x86-64)
--	if testing.Short() {
--		winc, einc = 10, 500 // quick test (~12ms on x86-64)
--	}
-+	var winc, einc = uint64(10), 500
- 
- 	for _, sign := range "+-" {
- 		for _, a := range add {
---- src/math/big/ratconv_test.go
-+++ src/math/big/ratconv_test.go
-@@ -344,9 +344,7 @@ func isFinite(f float64) bool {
- func TestFloat32SpecialCases(t *testing.T) {
- 	for _, input := range float64inputs {
- 		if strings.HasPrefix(input, "long:") {
--			if testing.Short() {
--				continue
--			}
-+			continue
- 			input = input[len("long:"):]
- 		}
- 
-@@ -400,9 +398,7 @@ func TestFloat32SpecialCases(t *testing.T) {
- func TestFloat64SpecialCases(t *testing.T) {
- 	for _, input := range float64inputs {
- 		if strings.HasPrefix(input, "long:") {
--			if testing.Short() {
--				continue
--			}
-+			continue
- 			input = input[len("long:"):]
- 		}
- 
---- src/net/dial_test.go
-+++ src/net/dial_test.go
-@@ -77,10 +77,7 @@ func TestSelfConnect(t *testing.T) {
- 	l.Close()
- 
- 	// Try to connect to that address repeatedly.
--	n := 100000
--	if testing.Short() {
--		n = 1000
--	}
-+	n := 1000
- 	switch runtime.GOOS {
- 	case "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "plan9", "solaris", "windows":
- 		// Non-Linux systems take a long time to figure
---- src/runtime/fastlog2_test.go
-+++ src/runtime/fastlog2_test.go
-@@ -16,11 +16,7 @@ func TestFastLog2(t *testing.T) {
- 	const randomBitCount = 26
- 	var e float64
- 
--	inc := 1
--	if testing.Short() {
--		// Check 1K total values, down from 64M.
--		inc = 1 << 16
--	}
-+	inc := 1 << 16
- 	for i := 1; i < 1<<randomBitCount; i += inc {
- 		l, fl := math.Log2(float64(i)), runtime.Fastlog2(float64(i))
- 		d := l - fl
---- src/runtime/hash_test.go
-+++ src/runtime/hash_test.go
-@@ -126,9 +126,7 @@ func TestSmhasherZeros(t *testing.T) {
- 
- // Strings with up to two nonzero bytes all have distinct hashes.
- func TestSmhasherTwoNonzero(t *testing.T) {
--	if testing.Short() {
--		t.Skip("Skipping in short mode")
--	}
-+	t.Skip("skipping test due to huge memory requirement")
- 	h := newHashSet()
- 	for n := 2; n <= 16; n++ {
- 		twoNonZero(h, n)
-@@ -229,9 +227,7 @@ func setbits(h *HashSet, b []byte, i int, k int) {
- // Test all possible combinations of n blocks from the set s.
- // "permutation" is a bad name here, but it is what Smhasher uses.
- func TestSmhasherPermutation(t *testing.T) {
--	if testing.Short() {
--		t.Skip("Skipping in short mode")
--	}
-+	t.Skip("skipping test due to huge memory requirement")
- 	permutation(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
- 	permutation(t, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
- 	permutation(t, []uint32{0, 1}, 20)
---- src/runtime/pprof/pprof_test.go
-+++ src/runtime/pprof/pprof_test.go
-@@ -257,14 +257,7 @@ func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Dur
- func TestCPUProfileWithFork(t *testing.T) {
- 	testenv.MustHaveExec(t)
- 
--	heap := 1 << 30
--	if runtime.GOOS == "android" {
--		// Use smaller size for Android to avoid crash.
--		heap = 100 << 20
--	}
--	if testing.Short() {
--		heap = 100 << 20
--	}
-+	heap := 100 << 20
- 	// This makes fork slower.
- 	garbage := make([]byte, heap)
- 	// Need to touch the slice, otherwise it won't be paged in.
diff --git a/go/push_goroot b/go/push_goroot
index 41612f7..0d7706e 100755
--- a/go/push_goroot
+++ b/go/push_goroot
@@ -11,24 +11,19 @@
 # It uses "target_sh" to remotely execute commands on the device.
 # It uses "target_cp" to transfer files to the device.
 
-goroot="$(target_tmpdir)/go"
+goroot="$(target_tmpdir)/goroot"
 for target in "$@"
 do
-	echo -n "pushing to ${target} ... "
+	echo -n "pushing goroot to ${target} ... "
 	target_sh ${target} "rm -rf ${goroot}"
 	target_sh ${target} "mkdir -p ${goroot}/pkg"
 
-	pkgdir="$(go_${target} env GOOS)_$(go_${target} env GOARCH)"
-	if [[ -d "pkg/${pkgdir}_shared" ]]
-	then
-		target_cp "pkg/${pkgdir}_shared" ${target}:${goroot}/pkg
-		target_sh ${target} "ln -s ${pkgdir}_shared ${goroot}/pkg/${pkgdir}"
-	else
-		target_cp "pkg/${pkgdir}" ${target}:${goroot}/pkg
-	fi
+	cd "$(go_${target} env GOROOT)"
+	pkgdir="pkg/$(go_${target} env GOOS)_$(go_${target} env GOARCH)"
+	target_cp "${pkgdir}" ${target}:${goroot}/pkg
 
 	target_cp "src" ${target}:${goroot}
 	target_cp "lib" ${target}:${goroot}
-	target_cp "test" ${target}:${goroot}
+	[[ -d test ]] && target_cp "test" ${target}:${goroot}
 	echo "done"
 done
diff --git a/go/test_go b/go/test_go
index 3740c1b..548712f 100755
--- a/go/test_go
+++ b/go/test_go
@@ -49,6 +49,7 @@
 do
 	echo
 	echo "## ${target}"
+	push_goroot ${target}
 
 	echo
 	echo "# test"
@@ -77,12 +78,4 @@
 	echo
 	echo "# misc/cgo/{test,testtls,nocgo}"
 	GOTRACEBACK=2 go_test ./misc/cgo/{test,testtls,nocgo}
-
-	echo
-	echo "# misc/cgo/testcshared"
-	(cd misc/cgo/testcshared && target="${target}" ./test.bash)
-
-	echo
-	echo "# misc/cgo/testsigfwd"
-	(cd misc/cgo/testsigfwd && go_${target} run -exec="go_${target}_exec" main.go)
 done
diff --git a/image_chromeos.py b/image_chromeos.py
index f65ad4d..aa8824b 100755
--- a/image_chromeos.py
+++ b/image_chromeos.py
@@ -12,6 +12,7 @@
 
 import argparse
 import filecmp
+import getpass
 import glob
 import os
 import re
@@ -64,6 +65,46 @@
     logger.GetLogger().LogOutput('Failed to disable beeps.')
 
 
+def FindChromeOSImage(image_file, chromeos_root):
+  """Find path for ChromeOS image inside chroot.
+
+  This function could be called with image paths that are either inside
+  or outside the chroot.  In either case the path needs to be translated
+  to an real/absolute path inside the chroot.
+  Example input paths:
+  /usr/local/google/home/uname/chromeos/chroot/tmp/my-test-images/image
+  ~/trunk/src/build/images/board/latest/image
+  /tmp/peppy-release/R67-1235.0.0/image
+
+  Corresponding example output paths:
+  /tmp/my-test-images/image
+  /home/uname/trunk/src/build/images/board/latest/image
+  /tmp/peppy-release/R67-1235.0,0/image
+  """
+
+  # Get the name of the user, for "/home/<user>" part of the path.
+  whoami = getpass.getuser()
+  # Get the full path for the chroot dir, including 'chroot'
+  real_chroot_dir = os.path.join(os.path.realpath(chromeos_root), 'chroot')
+  # Get the full path for the chromeos root, excluding 'chroot'
+  real_chromeos_root = os.path.realpath(chromeos_root)
+
+  # If path name starts with real_chroot_dir, remove that piece, but assume
+  # the rest of the path is correct.
+  if image_file.find(real_chroot_dir) != -1:
+    chroot_image = image_file[len(real_chroot_dir):]
+  # If path name starts with chromeos_root, excluding 'chroot', replace the
+  # chromeos_root with the prefix: '/home/<username>/trunk'.
+  elif image_file.find(real_chromeos_root) != -1:
+    chroot_image = image_file[len(real_chromeos_root):]
+    chroot_image = '/home/%s/trunk%s' % (whoami, chroot_image)
+  # Else assume the path is already internal, so leave it alone.
+  else:
+    chroot_image = image_file
+
+  return chroot_image
+
+
 def DoImage(argv):
   """Image ChromeOS."""
 
@@ -178,8 +219,10 @@
         reimage = True
         l.LogOutput('Checksums do not match. Re-imaging...')
 
+        chroot_image = FindChromeOSImage(located_image, options.chromeos_root)
+
         is_test_image = IsImageModdedForTest(options.chromeos_root,
-                                             located_image, log_level)
+                                             chroot_image, log_level)
 
         if not is_test_image and not options.force:
           logger.GetLogger().LogFatal('Have to pass --force to image a '
@@ -199,16 +242,6 @@
           os.path.realpath(options.chromeos_root), 'src')
       real_chroot_dir = os.path.join(
           os.path.realpath(options.chromeos_root), 'chroot')
-      if local_image:
-        if located_image.find(real_src_dir) != 0:
-          if located_image.find(real_chroot_dir) != 0:
-            raise RuntimeError('Located image: %s not in chromeos_root: %s' %
-                               (located_image, options.chromeos_root))
-          else:
-            chroot_image = located_image[len(real_chroot_dir):]
-        else:
-          chroot_image = os.path.join(
-              '~/trunk/src', located_image[len(real_src_dir):].lstrip('/'))
 
       # Check to see if cros flash will work for the remote machine.
       CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
@@ -247,11 +280,6 @@
       if log_level == 'average':
         cmd_executer.SetLogLevel(log_level)
 
-      if found == False:
-        temp_dir = os.path.dirname(located_image)
-        l.LogOutput('Deleting temp image dir: %s' % temp_dir)
-        shutil.rmtree(temp_dir)
-
       logger.GetLogger().LogFatalIf(ret, 'Image command failed')
 
       # Unfortunately cros_image_to_target.py sometimes returns early when the
@@ -277,12 +305,17 @@
             machine=options.remote)
         logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
 
-        successfully_imaged = VerifyChromeChecksum(options.chromeos_root, image,
-                                                   options.remote, log_level)
+        successfully_imaged = VerifyChromeChecksum(
+            options.chromeos_root, chroot_image, options.remote, log_level)
         logger.GetLogger().LogFatalIf(not successfully_imaged,
                                       'Image verification failed!')
         TryRemountPartitionAsRW(options.chromeos_root, options.remote,
                                 log_level)
+
+      if found == False:
+        temp_dir = os.path.dirname(located_image)
+        l.LogOutput('Deleting temp image dir: %s' % temp_dir)
+        shutil.rmtree(temp_dir)
     else:
       l.LogOutput('Checksums match. Skipping reimage')
     return ret
@@ -329,13 +362,12 @@
 def GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp):
   image_dir = os.path.dirname(image)
   image_file = os.path.basename(image)
-  mount_command = ('cd %s/src/scripts &&'
+  mount_command = ('cd ~/trunk/src/scripts &&'
                    './mount_gpt_image.sh --from=%s --image=%s'
                    ' --safe --read_only'
                    ' --rootfs_mountpt=%s'
-                   ' --stateful_mountpt=%s' %
-                   (chromeos_root, image_dir, image_file, rootfs_mp,
-                    stateful_mp))
+                   ' --stateful_mountpt=%s' % (image_dir, image_file, rootfs_mp,
+                                               stateful_mp))
   return mount_command
 
 
@@ -344,37 +376,63 @@
                rootfs_mp,
                stateful_mp,
                log_level,
-               unmount=False):
+               unmount=False,
+               extra_commands=''):
   cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
   command = GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp)
   if unmount:
     command = '%s --unmount' % command
-  ret = cmd_executer.RunCommand(command)
+  if extra_commands:
+    command = '%s ; %s' % (command, extra_commands)
+  ret, out, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command)
   logger.GetLogger().LogFatalIf(ret, 'Mount/unmount command failed!')
-  return ret
+  return out
 
 
 def IsImageModdedForTest(chromeos_root, image, log_level):
   if log_level != 'verbose':
     log_level = 'quiet'
-  rootfs_mp = tempfile.mkdtemp()
-  stateful_mp = tempfile.mkdtemp()
-  MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
+  command = 'mktemp -d'
+  cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
+  _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command)
+  _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput(
+      chromeos_root, command)
+  rootfs_mp = rootfs_mp.strip()
+  stateful_mp = stateful_mp.strip()
   lsb_release_file = os.path.join(rootfs_mp, 'etc/lsb-release')
-  lsb_release_contents = open(lsb_release_file).read()
-  is_test_image = re.search('test', lsb_release_contents, re.IGNORECASE)
+  extra = (
+      'grep CHROMEOS_RELEASE_DESCRIPTION %s | grep -i test' % lsb_release_file)
+  output = MountImage(
+      chromeos_root,
+      image,
+      rootfs_mp,
+      stateful_mp,
+      log_level,
+      extra_commands=extra)
+  is_test_image = re.search('test', output, re.IGNORECASE)
   MountImage(
       chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
   return is_test_image
 
 
 def VerifyChromeChecksum(chromeos_root, image, remote, log_level):
+  command = 'mktemp -d'
   cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
-  rootfs_mp = tempfile.mkdtemp()
-  stateful_mp = tempfile.mkdtemp()
-  MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
-  image_chrome_checksum = FileUtils().Md5File(
-      '%s/opt/google/chrome/chrome' % rootfs_mp, log_level=log_level)
+  _, rootfs_mp, _ = cmd_executer.ChrootRunCommandWOutput(chromeos_root, command)
+  _, stateful_mp, _ = cmd_executer.ChrootRunCommandWOutput(
+      chromeos_root, command)
+  rootfs_mp = rootfs_mp.strip()
+  stateful_mp = stateful_mp.strip()
+  chrome_file = '%s/opt/google/chrome/chrome' % rootfs_mp
+  extra = 'md5sum %s' % chrome_file
+  out = MountImage(
+      chromeos_root,
+      image,
+      rootfs_mp,
+      stateful_mp,
+      log_level,
+      extra_commands=extra)
+  image_chrome_checksum = out.strip().split()[0]
   MountImage(
       chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
 
diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py
new file mode 100755
index 0000000..459e702
--- /dev/null
+++ b/llvm_extra/create_ebuild_file.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python2
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import os
+import sys
+
+# This script takes an existing host llvm compiler ebuild and
+# creates another build that should be installable in a prefixed location.
+# The script patches a few lines in the llvm ebuild to make that happen.
+#
+# Since the script is based on the current llvm ebuild patterns,
+# it may need to be updated if those patterns change.
+#
+# This script should normally be invoked by the shell script
+# create_llvm_extra.sh .
+
+"""
+Below is an example of the expected diff of the newly generated ebuild with
+some explanation of the diffs.
+
+diff -Nuar llvm-pre7.0_pre335547_p20180529.ebuild newly-created-file.ebuild
+--- llvm-7.0_pre331547_p20180529-r8.ebuild
++++ newly-created-file.ebuild
+
+@@ -60,9 +60,9 @@ EGIT_REPO_URIS=(
+ fi
+
+ LICENSE="UoI-NCSA"
+-SLOT="0/${PV%%_*}"
++SLOT="${PV%%_p[[:digit:]]*}" # Creates a unique slot so that multiple copies
+                                of the new build can be installed.
+
+ KEYWORDS="-* amd64"
+
+ # Change USE flags to match llvm ebuild installtion. To see the set of flags
+ enabled in llvm compiler ebuild, run $ sudo emerge -pv llvm
+
+-IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi multitarget
++IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi +multitarget
+        ncurses ocaml python llvm-next llvm-tot test xml video_cards_radeon"
+
+ COMMON_DEPEND="
+@@ -145,6 +145,7 @@ pkg_pretend() {
+ }
+
+ pkg_setup() {
+ # This Change is to install the files in $PREFIX.
++       export PREFIX="/usr/${PN}/${SLOT}"
+        pkg_pretend
+ }
+
+@@ -272,13 +273,13 @@
+        sed -e "/RUN/s/-warn-error A//" -i test/Bindings/OCaml/*ml  || die
+
+        # Allow custom cmake build types (like 'Gentoo')
+ # Convert use of PN to llvm in epatch commands.
+-       epatch "${FILESDIR}"/cmake/${PN}-3.8-allow_custom_cmake_build_types.patch
++       epatch "${FILESDIR}"/cmake/llvm-3.8-allow_custom_cmake_build_types.patch
+
+        # crbug/591436
+        epatch "${FILESDIR}"/clang-executable-detection.patch
+
+        # crbug/606391
+-       epatch "${FILESDIR}"/${PN}-3.8-invocation.patch
++       epatch "${FILESDIR}"/llvm-3.8-invocation.patch
+
+@@ -411,11 +412,14 @@ src_install() {
+                /usr/include/llvm/Config/llvm-config.h
+        )
+
++       MULTILIB_CHOST_TOOLS=() # No need to install any multilib tools/headers.
++       MULTILIB_WRAPPED_HEADERS=()
+        multilib-minimal_src_install
+ }
+
+ multilib_src_install() {
+        cmake-utils_src_install
++       return # No need to install any wrappers.
+
+        local wrapper_script=clang_host_wrapper
+        cat "${FILESDIR}/clang_host_wrapper.header" \
+@@ -434,6 +438,7 @@ multilib_src_install() {
+ }
+
+ multilib_src_install_all() {
++       return # No need to install common multilib files.
+        insinto /usr/share/vim/vimfiles
+        doins -r utils/vim/*/.
+        # some users may find it useful
+"""
+
+def process_line(line, text):
+  # Process the line and append to the text we want to generate.
+  # Check if line has any patterns that we want to handle.
+  newline = line.strip()
+  if newline.startswith('#'):
+    # Do not process comment lines.
+    text.append(line)
+  elif line.startswith('SLOT='):
+    # Change SLOT to "${PV%%_p[[:digit:]]*}"
+    SLOT_STRING='SLOT="${PV%%_p[[:digit:]]*}"\n'
+    text.append(SLOT_STRING)
+  elif line.startswith('IUSE') and 'multitarget' in line:
+    # Enable multitarget USE flag.
+    newline = line.replace('multitarget', '+multitarget')
+    text.append(newline)
+  elif line.startswith('pkg_setup()'):
+    # Setup PREFIX.
+    text.append(line)
+    text.append('\texport PREFIX="/usr/${PN}/${SLOT}"\n')
+  elif line.startswith('multilib_src_install_all()'):
+    text.append(line)
+    # Do not install any common files.
+    text.append('\treturn\n')
+  elif 'epatch ' in line:
+    # Convert any $PN or ${PN} in epatch files to llvm.
+    newline = line.replace('$PN', 'llvm')
+    newline = newline.replace('${PN}', 'llvm')
+    text.append(newline)
+  elif 'multilib-minimal_src_install' in line:
+    # Disable MULTILIB_CHOST_TOOLS and MULTILIB_WRAPPED_HEADERS
+    text.append('\tMULTILIB_CHOST_TOOLS=()\n')
+    text.append('\tMULTILIB_WRAPPED_HEADERS=()\n')
+    text.append(line)
+  elif 'cmake-utils_src_install' in line:
+    text.append(line)
+    # Do not install any wrappers.
+    text.append('\treturn\n')
+  else:
+    text.append(line)
+
+
+def main():
+  if len(sys.argv) != 3:
+     filename = os.path.basename(__file__)
+     print ('Usage: ', filename,' <input.ebuild> <output.ebuild>')
+     return 1
+
+  text = []
+  with open(sys.argv[1], 'r') as infile:
+    for line in infile:
+      process_line(line, text)
+
+  with open(sys.argv[2], 'w') as outfile:
+    outfile.write("".join(text))
+
+  return 0
+
+
+if __name__== "__main__":
+  sys.exit(main())
diff --git a/llvm_extra/create_llvm_extra.sh b/llvm_extra/create_llvm_extra.sh
new file mode 100755
index 0000000..6f34a0b
--- /dev/null
+++ b/llvm_extra/create_llvm_extra.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script takes an existing llvm ebuild file and generate a llvm-extra
+# ebuild. The newly generated llvm-extra ebuild can be installed as a regular
+# host package.
+# The new ebuild should be generated in sys-devel/llvm-extra directory.
+# The script also copies all the files from files/ directory.
+# The generated llvm-extra ebuild is slotted so multiple instances of
+# llvm-extra ebuilds can be installed at same time.
+# The slot is derived based on the _pre<num> string in the llvm ebuild name.
+# e.g. For llvm-7.0_pre331547_p20180529-r8.ebuild, the slot will be
+# 7.0_pre331547.
+#
+# Usage:
+#  ./create_llvm_extra.sh /path/to/llvm-7.0_pre331547_p20180529-r8.ebuild
+#
+# To use the clang installed by llvm-extra, modify the CFLAGS and
+# LDFLAGS of a pckage to pass the patch of the clang binary installed by
+# the llvm-extra package.
+# e.g. append-flags -Xclang-path=/usr/llvm-extra/version/clang
+#      append-ldflags -Xclang-path=/usr/llvm-extra/version/clang
+#
+
+SCRIPT_DIR=$(realpath $(dirname "$0"))
+
+function check_cmd() {
+	if [[ "$#" -ne 1 ]]; then
+		echo "Exactly 1 argument expected"
+		echo "Usage $0 <path_to_llvm_ebuild>"
+		exit 1
+	fi
+	if [[ ! -f "$1" ]]; then
+		echo "$1 is not a file"
+		exit 1;
+	fi
+}
+
+function create_llvm_extra_ebuild() {
+	EBUILD_PREFIX=llvm-extra
+	EBUILD_DIR=$(dirname "$1")
+	EBUILD_FILE_NAME=$(basename "$1")
+	NEW_EBUILD_FILE_NAME="${EBUILD_FILE_NAME/llvm/$EBUILD_PREFIX}"
+	NEW_EBUILD_FILENAME_NO_EXT="${NEW_EBUILD_FILE_NAME%.*}"
+	NEW_EBUILD_DIR="${EBUILD_DIR}/../${EBUILD_PREFIX}"
+	NEW_EBUILD_PV="${NEW_EBUILD_FILENAME_NO_EXT#"$EBUILD_PREFIX-"}"
+	NEW_EBUILD_SLOT="${NEW_EBUILD_PV%%_p[[:digit:]]*}"
+
+	mkdir -p "${NEW_EBUILD_DIR}"
+	if [[ -d "${EBUILD_DIR}/files" ]]; then
+		cp -rf "${EBUILD_DIR}/files" "${NEW_EBUILD_DIR}"
+	fi
+
+	if [[ -f "${NEW_EBUILD_DIR}/${NEW_EBUILD_FILE_NAME}" ]]; then
+		echo "Removing existing ebuild file ${NEW_EBUILD_FILE_NAME}"
+		rm -f "${NEW_EBUILD_DIR}/${NEW_EBUILD_FILE_NAME}"
+	fi
+	# Generate the llvm-extra ebuild file.
+	"${SCRIPT_DIR}"/create_ebuild_file.py "$1" "${NEW_EBUILD_DIR}/${NEW_EBUILD_FILE_NAME}"
+	if [[ $? -ne 0 ]]; then
+		echo "Creation of ${NEW_EBUILD_DIR}/${NEW_EBUILD_FILE_NAME} failed"
+		exit 1
+	fi
+	echo "***"
+	echo "***"
+	echo "${NEW_EBUILD_DIR}/${NEW_EBUILD_FILE_NAME} has been created."
+
+	echo "***"
+	echo "Test if it builds by running \$ sudo emerge ${EBUILD_PREFIX}:${NEW_EBUILD_SLOT}"
+	echo "***"
+	echo "If it works, Go ahead and submit the newly generated ebuild"\
+	     "and any other files in ${NEW_EBUILD_DIR}."
+	echo "***"
+	echo "Don't forget to add sys-devel/${EBUILD_PREFIX}:${NEW_EBUILD_SLOT} to"\
+	     "the dependencies in virtual/target-chromium-os-sdk ebuild."
+	echo "***"
+	echo "***"
+}
+
+
+set -e
+# Sanity checks.
+check_cmd "${@}"
+# Create llvm-extra ebuild.
+create_llvm_extra_ebuild "${@}"
diff --git a/new-generate-waterfall-reports.py b/new-generate-waterfall-reports.py
new file mode 100755
index 0000000..ef48f8b
--- /dev/null
+++ b/new-generate-waterfall-reports.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python2
+"""Generate summary report for ChromeOS toolchain waterfalls."""
+
+from __future__ import print_function
+
+import argparse
+import datetime
+import getpass
+import json
+import os
+import re
+import shutil
+import sys
+import time
+
+from cros_utils import command_executer
+
+# All the test suites whose data we might want for the reports.
+TESTS = (('bvt-inline', 'HWTest [bvt-inline]'), ('bvt-cq', 'HWTest [bvt-cq]'),
+         ('security', 'HWTest [security]'))
+
+# The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM
+# LISTED IN THE REPORT.
+WATERFALL_BUILDERS = [
+    'amd64-llvm-next-toolchain',
+    'arm-llvm-next-toolchain',
+    'arm64-llvm-next-toolchain',
+]
+
+DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/'
+ARCHIVE_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-reports/'
+DOWNLOAD_DIR = '/tmp/waterfall-logs'
+MAX_SAVE_RECORDS = 7
+BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR
+LLVM_ROTATING_BUILDER = 'llvm_next_toolchain'
+ROTATING_BUILDERS = [LLVM_ROTATING_BUILDER]
+
+# For int-to-string date conversion.  Note, the index of the month in this
+# list needs to correspond to the month's integer value.  i.e. 'Sep' must
+# be as MONTHS[9].
+MONTHS = [
+    '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
+    'Nov', 'Dec'
+]
+
+DAYS_PER_MONTH = {
+    1: 31,
+    2: 28,
+    3: 31,
+    4: 30,
+    5: 31,
+    6: 30,
+    7: 31,
+    8: 31,
+    9: 30,
+    10: 31,
+    11: 31,
+    12: 31
+}
+
+
+def format_date(int_date, use_int_month=False):
+  """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD"""
+
+  if int_date == 0:
+    return 'today'
+
+  tmp_date = int_date
+  day = tmp_date % 100
+  tmp_date = tmp_date / 100
+  month = tmp_date % 100
+  year = tmp_date / 100
+
+  if use_int_month:
+    date_str = '%d-%02d-%02d' % (year, month, day)
+  else:
+    month_str = MONTHS[month]
+    date_str = '%d-%s-%d' % (year, month_str, day)
+  return date_str
+
+
+def EmailReport(report_file, report_type, date, email_to):
+  """Emails the report to the approprite address."""
+  subject = '%s Waterfall Summary report, %s' % (report_type, date)
+  sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr'
+  command = ('%s --to=%s --subject="%s" --body_file=%s' %
+             (sendgmr_path, email_to, subject, report_file))
+  command_executer.GetCommandExecuter().RunCommand(command)
+
+
+def GetColor(status):
+  """Given a job status string, returns appropriate color string."""
+  if status.strip() == 'pass':
+    color = 'green '
+  elif status.strip() == 'fail':
+    color = ' red  '
+  elif status.strip() == 'warning':
+    color = 'orange'
+  else:
+    color = '      '
+  return color
+
+
+def GenerateWaterfallReport(report_dict, waterfall_type, date):
+  """Write out the actual formatted report."""
+
+  filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date)
+
+  date_string = ''
+  report_list = report_dict.keys()
+
+  with open(filename, 'w') as out_file:
+    # Write Report Header
+    out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' %
+                   (waterfall_type, date_string))
+    out_file.write('                                                        \n')
+    out_file.write(
+        '                                         Build       bvt-    '
+        '     bvt-cq     '
+        ' security \n')
+    out_file.write(
+        '                                         status     inline   '
+        '              \n')
+
+    # Write daily waterfall status section.
+    for builder in report_list:
+      build_dict = report_dict[builder]
+      buildbucket_id = build_dict['buildbucket_id']
+      overall_status = build_dict['status']
+      if 'bvt-inline' in build_dict.keys():
+        inline_status = build_dict['bvt-inline']
+      else:
+        inline_status = '    '
+      if 'bvt-cq' in build_dict.keys():
+        cq_status = build_dict['bvt-cq']
+      else:
+        cq_status = '    '
+      if 'security' in build_dict.keys():
+        security_status = build_dict['security']
+      else:
+        security_status = '    '
+      inline_color = GetColor(inline_status)
+      cq_color = GetColor(cq_status)
+      security_color = GetColor(security_status)
+
+      out_file.write(
+          '%26s   %4s        %6s        %6s         %6s\n' %
+          (builder, overall_status, inline_color, cq_color, security_color))
+      if waterfall_type == 'main':
+        out_file.write('     build url: https://cros-goldeneye.corp.google.com/'
+                       'chromeos/healthmonitoring/buildDetails?buildbucketId=%s'
+                       '\n' % buildbucket_id)
+      else:
+        out_file.write('     build url: https://ci.chromium.org/p/chromeos/'
+                       'builds/b%s \n' % buildbucket_id)
+        report_url = ('https://logs.chromium.org/v/?s=chromeos%2Fbuildbucket%2F'
+                      'cr-buildbucket.appspot.com%2F' + buildbucket_id +
+                      '%2F%2B%2Fsteps%2FReport%2F0%2Fstdout')
+        out_file.write('\n     report status url: %s\n' % report_url)
+      out_file.write('\n')
+
+    print('Report generated in %s.' % filename)
+    return filename
+
+
+def GetTryjobData(date, rotating_builds_dict):
+  """Read buildbucket id and board from stored file.
+
+  buildbot_test_llvm.py, when it launches the rotating builders,
+  records the buildbucket_id and board for each launch in a file.
+  This reads that data out of the file so we can find the right
+  tryjob data.
+  """
+
+  date_str = format_date(date, use_int_month=True)
+  fname = '%s.builds' % date_str
+  filename = os.path.join(DATA_DIR, 'rotating-builders', fname)
+
+  if not os.path.exists(filename):
+    print('Cannot find file: %s' % filename)
+    print('Unable to generate rotating builder report for date %d.' % date)
+    return
+
+  with open(filename, 'r') as in_file:
+    lines = in_file.readlines()
+
+  for line in lines:
+    l = line.strip()
+    parts = l.split(',')
+    if len(parts) != 2:
+      print('Warning: Illegal line in data file.')
+      print('File: %s' % filename)
+      print('Line: %s' % l)
+      continue
+    buildbucket_id = parts[0]
+    board = parts[1]
+    rotating_builds_dict[board] = buildbucket_id
+
+  return
+
+
+def GetRotatingBuildData(date, report_dict, chromeos_root, board,
+                         buildbucket_id, ce):
+  """Gets rotating builder job results via 'cros buildresult'."""
+  path = os.path.join(chromeos_root, 'chromite')
+  save_dir = os.getcwd()
+  date_str = format_date(date, use_int_month=True)
+  os.chdir(path)
+
+  command = (
+      'cros buildresult --buildbucket-id %s --report json' % buildbucket_id)
+  _, out, _ = ce.RunCommandWOutput(command)
+  tmp_dict = json.loads(out)
+  results = tmp_dict[buildbucket_id]
+
+  board_dict = dict()
+  board_dict['buildbucket_id'] = buildbucket_id
+  stages_results = results['stages']
+  for test in TESTS:
+    key1 = test[0]
+    key2 = test[1]
+    if key2 in stages_results:
+      board_dict[key1] = stages_results[key2]
+  board_dict['status'] = results['status']
+  report_dict[board] = board_dict
+  os.chdir(save_dir)
+  return
+
+
+def GetMainWaterfallData(date, report_dict, chromeos_root, ce):
+  """Gets main waterfall job results via 'cros buildresult'."""
+  path = os.path.join(chromeos_root, 'chromite')
+  save_dir = os.getcwd()
+  date_str = format_date(date, use_int_month=True)
+  os.chdir(path)
+  for builder in WATERFALL_BUILDERS:
+    command = ('cros buildresult --build-config %s --date %s --report json' %
+               (builder, date_str))
+    _, out, _ = ce.RunCommandWOutput(command)
+    tmp_dict = json.loads(out)
+    builder_dict = dict()
+    for k in tmp_dict.keys():
+      buildbucket_id = k
+      results = tmp_dict[k]
+
+    builder_dict['buildbucket_id'] = buildbucket_id
+    builder_dict['status'] = results['status']
+    stages_results = results['stages']
+    for test in TESTS:
+      key1 = test[0]
+      key2 = test[1]
+      builder_dict[key1] = stages_results[key2]
+    report_dict[builder] = builder_dict
+  os.chdir(save_dir)
+  return
+
+
+# Check for prodaccess.
+def CheckProdAccess():
+  """Verifies prodaccess is current."""
+  status, output, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+      'prodcertstatus')
+  if status != 0:
+    return False
+  # Verify that status is not expired
+  if 'expires' in output:
+    return True
+  return False
+
+
+def ValidDate(date):
+  """Ensures 'date' is a valid date."""
+  min_year = 2018
+
+  tmp_date = date
+  day = tmp_date % 100
+  tmp_date = tmp_date / 100
+  month = tmp_date % 100
+  year = tmp_date / 100
+
+  if day < 1 or month < 1 or year < min_year:
+    return False
+
+  cur_year = datetime.datetime.now().year
+  if year > cur_year:
+    return False
+
+  if month > 12:
+    return False
+
+  if month == 2 and cur_year % 4 == 0 and cur_year % 100 != 0:
+    max_day = 29
+  else:
+    max_day = DAYS_PER_MONTH[month]
+
+  if day > max_day:
+    return False
+
+  return True
+
+
+def ValidOptions(parser, options):
+  """Error-check the options passed to this script."""
+  too_many_options = False
+  if options.main:
+    if options.rotating:
+      too_many_options = True
+
+  if too_many_options:
+    parser.error('Can only specify one of --main, --rotating.')
+
+  if not os.path.exists(options.chromeos_root):
+    parser.error(
+        'Invalid chromeos root. Cannot find: %s' % options.chromeos_root)
+
+  email_ok = True
+  if options.email and options.email.find('@') == -1:
+    email_ok = False
+    parser.error('"%s" is not a valid email address; it must contain "@..."' %
+                 options.email)
+
+  valid_date = ValidDate(options.date)
+
+  return not too_many_options and valid_date and email_ok
+
+
+def Main(argv):
+  """Main function for this script."""
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      '--main',
+      dest='main',
+      default=False,
+      action='store_true',
+      help='Generate report only for main waterfall '
+      'builders.')
+  parser.add_argument(
+      '--rotating',
+      dest='rotating',
+      default=False,
+      action='store_true',
+      help='Generate report only for rotating builders.')
+  parser.add_argument(
+      '--date',
+      dest='date',
+      required=True,
+      type=int,
+      help='The date YYYYMMDD of waterfall report.')
+  parser.add_argument(
+      '--email',
+      dest='email',
+      default='',
+      help='Email address to use for sending the report.')
+  parser.add_argument(
+      '--chromeos_root',
+      dest='chromeos_root',
+      required=True,
+      help='Chrome OS root in which to run chroot commands.')
+
+  options = parser.parse_args(argv)
+
+  if not ValidOptions(parser, options):
+    return 1
+
+  main_only = options.main
+  rotating_only = options.rotating
+  date = options.date
+
+  prod_access = CheckProdAccess()
+  if not prod_access:
+    print('ERROR: Please run prodaccess first.')
+    return
+
+  waterfall_report_dict = dict()
+  rotating_report_dict = dict()
+
+  ce = command_executer.GetCommandExecuter()
+  if not rotating_only:
+    GetMainWaterfallData(date, waterfall_report_dict, options.chromeos_root, ce)
+
+  if not main_only:
+    rotating_builds_dict = dict()
+    GetTryjobData(date, rotating_builds_dict)
+    if len(rotating_builds_dict.keys()) > 0:
+      for board in rotating_builds_dict.keys():
+        buildbucket_id = rotating_builds_dict[board]
+        GetRotatingBuildData(date, rotating_report_dict, options.chromeos_root,
+                             board, buildbucket_id, ce)
+
+  if options.email:
+    email_to = options.email
+  else:
+    email_to = getpass.getuser()
+
+  if waterfall_report_dict and not rotating_only:
+    main_report = GenerateWaterfallReport(waterfall_report_dict, 'main', date)
+
+    EmailReport(main_report, 'Main', format_date(date), email_to)
+    shutil.copy(main_report, ARCHIVE_DIR)
+  if rotating_report_dict and not main_only:
+    rotating_report = GenerateWaterfallReport(rotating_report_dict, 'rotating',
+                                              date)
+
+    EmailReport(rotating_report, 'Rotating', format_date(date), email_to)
+    shutil.copy(rotating_report, ARCHIVE_DIR)
+
+
+if __name__ == '__main__':
+  Main(sys.argv[1:])
+  sys.exit(0)
diff --git a/pyrun b/pyrun
index d9f5d75..9aac2f2 100755
--- a/pyrun
+++ b/pyrun
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-DIRECTORY=$(cd `dirname $0` && pwd)
-PYTHONPATH=$DIRECTORY:$PYTHONPATH
+DIRECTORY=$(cd "$(dirname "$0")" && pwd)
+export PYTHONPATH="${DIRECTORY}:${PYTHONPATH}"
 
-$*
+"$@"